From f40c16581d23447a4ca92b1620610a7c05c72b87 Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Wed, 24 Jun 2020 13:07:17 +0300 Subject: [PATCH 01/18] Updates guide for CandidateBacking --- .../src/node/backing/candidate-backing.md | 79 +++++++++++++------ .../src/types/overseer-protocol.md | 2 + 2 files changed, 55 insertions(+), 26 deletions(-) diff --git a/roadmap/implementors-guide/src/node/backing/candidate-backing.md b/roadmap/implementors-guide/src/node/backing/candidate-backing.md index c57680e6356c..0aed8281b465 100644 --- a/roadmap/implementors-guide/src/node/backing/candidate-backing.md +++ b/roadmap/implementors-guide/src/node/backing/candidate-backing.md @@ -2,30 +2,41 @@ The Candidate Backing subsystem ensures every parablock considered for relay block inclusion has been seconded by at least one validator, and approved by a quorum. Parablocks for which no validator will assert correctness are discarded. If the block later proves invalid, the initial backers are slashable; this gives polkadot a rational threat model during subsequent stages. -Its role is to produce backable candidates for inclusion in new relay-chain blocks. It does so by issuing signed [`Statement`s](../../types/backing.md#statement-type) and tracking received statements signed by other validators. Once enough statements are received, they can be combined into backing for specific candidates. +Its role is to produce backable candidates for inclusion in new relay-chain blocks. It does so by issuing signed [`Statement`s][Statement] and tracking received statements signed by other validators. Once enough statements are received, they can be combined into backing for specific candidates. Note that though the candidate backing subsystem attempts to produce as many backable candidates as possible, it does _not_ attempt to choose a single authoritative one. The choice of which actually gets included is ultimately up to the block author, by whatever metrics it may use; those are opaque to this subsystem. -Once a sufficient quorum has agreed that a candidate is valid, this subsystem notifies the [Provisioner](../utility/provisioner.md), which in turn engages block production mechanisms to include the parablock. +Once a sufficient quorum has agreed that a candidate is valid, this subsystem notifies the [Provisioner][PV], which in turn engages block production mechanisms to include the parablock. ## Protocol -The [Candidate Selection subsystem](candidate-selection.md) is the primary source of non-overseer messages into this subsystem. That subsystem generates appropriate [`CandidateBackingMessage`s](../../types/overseer-protocol.md#candidate-backing-message), and passes them to this subsystem. +Input: [`CandidateBackingMessage`][CBM] -This subsystem validates the candidates and generates an appropriate [`Statement`](../../types/backing.md#statement-type). All `Statement`s are then passed on to the [Statement Distribution subsystem](statement-distribution.md) to be gossiped to peers. When this subsystem decides that a candidate is invalid, and it was recommended to us to second by our own Candidate Selection subsystem, a message is sent to the Candidate Selection subsystem with the candidate's hash so that the collator which recommended it can be penalized. +Output: + +- [`CandidateValidationMessage`][CVM] +- [`RuntimeApiMessage`][RAM] +- [`CandidateSelectionMessage`][CSM] +- [`ProvisionerMessage`][PM] +- PoV Distribution (Statement Distribution) ## Functionality +The [Candidate Selection][CS] subsystem is the primary source of non-overseer messages into this subsystem. That subsystem generates appropriate [`CandidateBackingMessage`s][CBM] and passes them to this subsystem. + +This subsystem requests validation from the [Candidate Validation][CV] and generates an appropriate [`Statement`][Statement]. All `Statement`s are then passed on to the [Statement Distribution][SD] subsystem to be gossiped to peers. When [Candidate Validation][CV] decides that a candidate is invalid, and it was recommended to us to second by our own [Candidate Selection][CS] subsystem, a message is sent to the [Candidate Selection][CS] subsystem with the candidate's hash so that the collator which recommended it can be penalized. + The subsystem should maintain a set of handles to Candidate Backing Jobs that are currently live, as well as the relay-parent to which they correspond. ### On Overseer Signal -* If the signal is an [`OverseerSignal`](../../types/overseer-protocol.md#overseer-signal)`::StartWork(relay_parent)`, spawn a Candidate Backing Job with the given relay parent, storing a bidirectional channel with the Candidate Backing Job in the set of handles. -* If the signal is an [`OverseerSignal`](../../types/overseer-protocol.md#overseer-signal)`::StopWork(relay_parent)`, cease the Candidate Backing Job under that relay parent, if any. +* If the signal is an [`OverseerSignal`][OverseerSignal]`::StartWork(relay_parent)`, spawn a Candidate Backing Job with the given relay parent, storing a bidirectional channel with the Candidate Backing Job in the set of handles. +* If the signal is an [`OverseerSignal`][OverseerSignal]`::StopWork(relay_parent)`, cease the Candidate Backing Job under that relay parent, if any. -### On `CandidateBackingMessage` +### On Receiving `CandidateBackingMessage` -* If the message corresponds to a particular relay-parent, forward the message to the Candidate Backing Job for that relay-parent, if any is live. +* If the message is a [`CandidateBackingMessage`][CBM]`::RegisterBackingWatcher`, register the watcher and trigger it each time a new candidate is backable. Also trigger it once initially if there are any backable candidates at the time of receipt. +* If the message is a [`CandidateBackingMessage`][CBM]`::Second`, sign and dispatch a `Seconded` statement only if we have not seconded any other candidate and have not signed a `Valid` statement for the requested candidate. Signing both a `Seconded` and `Valid` message is a double-voting misbehavior with a heavy penalty, and this could occur if another validator has seconded the same candidate and we've received their message before the internal seconding request. > big TODO: "contextual execution" > @@ -39,36 +50,37 @@ The subsystem should maintain a set of handles to Candidate Backing Jobs that ar The Candidate Backing Job represents the work a node does for backing candidates with respect to a particular relay-parent. -The goal of a Candidate Backing Job is to produce as many backable candidates as possible. This is done via signed [`Statement`s](../../types/backing.md#statement-type) by validators. If a candidate receives a majority of supporting Statements from the Parachain Validators currently assigned, then that candidate is considered backable. +The goal of a Candidate Backing Job is to produce as many backable candidates as possible. This is done via signed [`Statement`s][STMT] by validators. If a candidate receives a majority of supporting Statements from the Parachain Validators currently assigned, then that candidate is considered backable. ### On Startup -* Fetch current validator set, validator -> parachain assignments from runtime API. +* Fetch current validator set, validator -> parachain assignments from [`Runtime API`][RA] subsystem using [`RuntimeApiRequest::Validators`][RAM] and [`RuntimeApiRequest::DutyRoster`][RAM] * Determine if the node controls a key in the current validator set. Call this the local key if so. -* If the local key exists, extract the parachain head and validation function for the parachain the local key is assigned to. +* If the local key exists, extract the parachain head and validation function from the [`Runtime API`][RA] for the parachain the local key is assigned to by issuing a [`RuntimeApiRequest::Validators`][RAM] +* Issue a [`RuntimeApiRequest::SigningContext`][RAM] message to get a context that will later be used upon signing. -### On Receiving New Signed Statement +### On Receiving New Candidate Backing Message -```rust -if let Statement::Seconded(candidate) = signed.statement { - if candidate is unknown and in local assignment { - spawn_validation_work(candidate, parachain head, validation function) +```rust,ignore +match msg { + CandidateBackingMessage::Second(hash, candidate) => { + if candidate is unknown and in local assignment { + spawn_validation_work(candidate, parachain head, validation function) + } } } - -// add `Seconded` statements and `Valid` statements to a quorum. If quorum reaches validator-group -// majority, send a `BlockAuthorshipProvisioning::BackableCandidate(relay_parent, Candidate, Backing)` message. ``` -### Spawning Validation Work +Add `Seconded` statements and `Valid` statements to a quorum. If quorum reaches validator-group majority, send a [`ProvisionerMessage::ProvisionableData(ProvisionableData::BackedCandidate(BackedCandidate))`][PM] message. + +### Validating Candidates. -```rust +```rust,ignore fn spawn_validation_work(candidate, parachain head, validation function) { asynchronously { let pov = (fetch pov block).await - // dispatched to sub-process (OS process) pool. - let valid = validate_candidate(candidate, validation function, parachain head, pov).await; + let valid = (validate pov block).await; if valid { // make PoV available for later distribution. Send data to the availability store to keep. // sign and dispatch `valid` statement to network if we have not seconded the given candidate. @@ -84,9 +96,24 @@ fn spawn_validation_work(candidate, parachain head, validation function) { Create a `(sender, receiver)` pair. Dispatch a `PovFetchSubsystemMessage(relay_parent, candidate_hash, sender)` and listen on the receiver for a response. -### On Receiving `CandidateBackingMessage` +### Validate PoV Block -* If the message is a `CandidateBackingMessage::RegisterBackingWatcher`, register the watcher and trigger it each time a new candidate is backable. Also trigger it once initially if there are any backable candidates at the time of receipt. -* If the message is a `CandidateBackingMessage::Second`, sign and dispatch a `Seconded` statement only if we have not seconded any other candidate and have not signed a `Valid` statement for the requested candidate. Signing both a `Seconded` and `Valid` message is a double-voting misbehavior with a heavy penalty, and this could occur if another validator has seconded the same candidate and we've received their message before the internal seconding request. +Create a `(sender, receiver)` pair. +Dispatch a `CandidateValidationMessage::Validate(validation function, candidate, pov, sender)` and listen on the receiver for a response. > TODO: send statements to Statement Distribution subsystem, handle shutdown signal from candidate backing subsystem + +[OverseerSignal]: ../../types/overseer-protocol.md#overseer-signal +[Statement]: ../../types/backing.md#statement-type +[STMT]: ../../types/backing.md#statement-type +[CSM]: ../../types/overseer-protocol.md#candidate-selection-message +[RAM]: ../../types/overseer-protocol.md#runtime-api-message +[CVM]: ../../types/overseer-protocol.md#validation-request-type +[PM]: ../../types/overseer-protocol.md#provisioner-message +[CBM]: ../../types/overseer-protocol.md#candidate-backing-message + +[CS]: candidate-selection.md +[CV]: ../utility/candidate-validation.md +[SD]: statement-distribution.md +[RA]: ../utility/runtime-api.md +[PV]: ../utility/provisioner.md diff --git a/roadmap/implementors-guide/src/types/overseer-protocol.md b/roadmap/implementors-guide/src/types/overseer-protocol.md index 6a1bdfa96ed9..3af04dd8c7fc 100644 --- a/roadmap/implementors-guide/src/types/overseer-protocol.md +++ b/roadmap/implementors-guide/src/types/overseer-protocol.md @@ -211,6 +211,8 @@ Other subsystems query this data by sending these messages. enum RuntimeApiRequest { /// Get the current validator set. Validators(ResponseChannel>), + /// Get the assignments of validators to parachains. + DutyRoster(ResponseChannel), /// Get a signing context for bitfields and statements. SigningContext(ResponseChannel), /// Get the validation code for a specific para, assuming execution under given block number, and From 4031bc70d761b86a47f070483ac47293fbcb454a Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Thu, 25 Jun 2020 10:44:09 +0300 Subject: [PATCH 02/18] Move assignment types to primitives --- node/messages/src/lib.rs | 15 +++ primitives/src/parachain.rs | 94 +++++++++++++++++++ .../src/node/backing/candidate-backing.md | 8 +- .../src/types/overseer-protocol.md | 17 +++- runtime/parachains/src/inclusion.rs | 4 +- runtime/parachains/src/scheduler.rs | 92 +----------------- 6 files changed, 134 insertions(+), 96 deletions(-) diff --git a/node/messages/src/lib.rs b/node/messages/src/lib.rs index 3a413f2c67bb..68eb3a4c4b99 100644 --- a/node/messages/src/lib.rs +++ b/node/messages/src/lib.rs @@ -29,6 +29,7 @@ use polkadot_primitives::{BlockNumber, Hash, Signature}; use polkadot_primitives::parachain::{ AbridgedCandidateReceipt, PoVBlock, ErasureChunk, BackedCandidate, Id as ParaId, SignedAvailabilityBitfield, SigningContext, ValidatorId, ValidationCode, ValidatorIndex, + CoreAssignment, CoreOccupied, }; use polkadot_node_primitives::{ MisbehaviorReport, SignedFullStatement, @@ -158,10 +159,24 @@ pub enum AvailabilityStoreMessage { StoreChunk(Hash, ValidatorIndex, ErasureChunk), } +/// The information on scheduler assignments that some somesystems may be querying. +pub struct SchedulerRoster { + /// Validator-to-groups assignments. + pub validator_groups: Vec>, + /// All scheduled paras. + pub scheduled: Vec, + /// Upcoming paras (chains and threads). + pub upcoming: Vec, + /// Occupied cores. + pub availability_cores: Vec>, +} + /// A request to the Runtime API subsystem. pub enum RuntimeApiRequest { /// Get the current validator set. Validators(oneshot::Sender>), + /// Get the assignments of validators to cores. + ValidatorGroups(oneshot::Sender), /// Get a signing context for bitfields and statements. SigningContext(oneshot::Sender), /// Get the validation code for a specific para, assuming execution under given block number, and diff --git a/primitives/src/parachain.rs b/primitives/src/parachain.rs index a24cb5b612de..7055f8590fc8 100644 --- a/primitives/src/parachain.rs +++ b/primitives/src/parachain.rs @@ -170,6 +170,100 @@ pub struct DutyRoster { pub validator_duty: Vec, } +/// The unique (during session) index of a core. +#[derive(Encode, Decode, Default, PartialOrd, Ord, Eq, PartialEq, Clone, Copy)] +#[cfg_attr(test, derive(Debug))] +pub struct CoreIndex(pub u32); + +impl From for CoreIndex { + fn from(i: u32) -> CoreIndex { + CoreIndex(i) + } +} + +/// The unique (during session) index of a validator group. +#[derive(Encode, Decode, Default, Clone, Copy)] +#[cfg_attr(test, derive(PartialEq, Debug))] +pub struct GroupIndex(pub u32); + +impl From for GroupIndex { + fn from(i: u32) -> GroupIndex { + GroupIndex(i) + } +} + +/// A claim on authoring the next block for a given parathread. +#[derive(Clone, Encode, Decode, Default)] +#[cfg_attr(test, derive(PartialEq, Debug))] +pub struct ParathreadClaim(pub Id, pub CollatorId); + +/// An entry tracking a claim to ensure it does not pass the maximum number of retries. +#[derive(Clone, Encode, Decode, Default)] +#[cfg_attr(test, derive(PartialEq, Debug))] +pub struct ParathreadEntry { + /// The claim. + pub claim: ParathreadClaim, + /// Number of retries. + pub retries: u32, +} + +/// What is occupying a specific availability core. +#[derive(Clone, Encode, Decode)] +#[cfg_attr(test, derive(PartialEq, Debug))] +pub enum CoreOccupied { + /// A parathread. + Parathread(ParathreadEntry), + /// A parachain. + Parachain, +} + +/// The assignment type. +#[derive(Clone, Encode, Decode)] +#[cfg_attr(test, derive(PartialEq, Debug))] +pub enum AssignmentKind { + /// A parachain. + Parachain, + /// A parathread. + Parathread(CollatorId, u32), +} + +/// How a free core is scheduled to be assigned. +#[derive(Clone, Encode, Decode)] +#[cfg_attr(test, derive(PartialEq, Debug))] +pub struct CoreAssignment { + /// The core that is assigned. + pub core: CoreIndex, + /// The unique ID of the para that is assigned to the core. + pub para_id: Id, + /// The kind of the assignment. + pub kind: AssignmentKind, + /// The index of the validator group assigned to the core. + pub group_idx: GroupIndex, +} + +impl CoreAssignment { + /// Get the ID of a collator who is required to collate this block. + pub fn required_collator(&self) -> Option<&CollatorId> { + match self.kind { + AssignmentKind::Parachain => None, + AssignmentKind::Parathread(ref id, _) => Some(id), + } + } + + /// Get the `CoreOccupied` from this. + pub fn to_core_occupied(&self) -> CoreOccupied { + match self.kind { + AssignmentKind::Parachain => CoreOccupied::Parachain, + AssignmentKind::Parathread(ref collator, retries) => CoreOccupied::Parathread( + ParathreadEntry { + claim: ParathreadClaim(self.para_id, collator.clone()), + retries, + } + ), + } + } +} + /// Extra data that is needed along with the other fields in a `CandidateReceipt` /// to fully validate the candidate. /// diff --git a/roadmap/implementors-guide/src/node/backing/candidate-backing.md b/roadmap/implementors-guide/src/node/backing/candidate-backing.md index 0aed8281b465..abf84397fb39 100644 --- a/roadmap/implementors-guide/src/node/backing/candidate-backing.md +++ b/roadmap/implementors-guide/src/node/backing/candidate-backing.md @@ -54,14 +54,14 @@ The goal of a Candidate Backing Job is to produce as many backable candidates as ### On Startup -* Fetch current validator set, validator -> parachain assignments from [`Runtime API`][RA] subsystem using [`RuntimeApiRequest::Validators`][RAM] and [`RuntimeApiRequest::DutyRoster`][RAM] +* Fetch current validator set, validator -> parachain assignments from [`Runtime API`][RA] subsystem using [`RuntimeApiRequest::Validators`][RAM] and [`RuntimeApiRequest::ValidatorGroups`][RAM] * Determine if the node controls a key in the current validator set. Call this the local key if so. * If the local key exists, extract the parachain head and validation function from the [`Runtime API`][RA] for the parachain the local key is assigned to by issuing a [`RuntimeApiRequest::Validators`][RAM] * Issue a [`RuntimeApiRequest::SigningContext`][RAM] message to get a context that will later be used upon signing. ### On Receiving New Candidate Backing Message -```rust,ignore +```rust match msg { CandidateBackingMessage::Second(hash, candidate) => { if candidate is unknown and in local assignment { @@ -71,11 +71,11 @@ match msg { } ``` -Add `Seconded` statements and `Valid` statements to a quorum. If quorum reaches validator-group majority, send a [`ProvisionerMessage::ProvisionableData(ProvisionableData::BackedCandidate(BackedCandidate))`][PM] message. +Add `Seconded` statements and `Valid` statements to a quorum. If quorum reaches validator-group majority, send a [`ProvisionerMessage`][PM]`::ProvisionableData(ProvisionableData::BackedCandidate(BackedCandidate))` message. ### Validating Candidates. -```rust,ignore +```rust fn spawn_validation_work(candidate, parachain head, validation function) { asynchronously { let pov = (fetch pov block).await diff --git a/roadmap/implementors-guide/src/types/overseer-protocol.md b/roadmap/implementors-guide/src/types/overseer-protocol.md index 3af04dd8c7fc..49c0f226adc2 100644 --- a/roadmap/implementors-guide/src/types/overseer-protocol.md +++ b/roadmap/implementors-guide/src/types/overseer-protocol.md @@ -208,11 +208,24 @@ The Runtime API subsystem is responsible for providing an interface to the state Other subsystems query this data by sending these messages. ```rust +/// The information on validator groups, core assignments, +/// upcoming paras and availability cores. +struct SchedulerRoster { + /// Validator-to-groups assignments. + validator_groups: Vec>, + /// All scheduled paras. + scheduled: Vec, + /// Upcoming paras (chains and threads). + upcoming: Vec, + /// Occupied cores. + availability_cores: Vec>, +} + enum RuntimeApiRequest { /// Get the current validator set. Validators(ResponseChannel>), - /// Get the assignments of validators to parachains. - DutyRoster(ResponseChannel), + /// Get the assignments of validators to cores, upcoming parachains. + SchedulerRoster(ResponseChannel), /// Get a signing context for bitfields and statements. SigningContext(ResponseChannel), /// Get the validation code for a specific para, assuming execution under given block number, and diff --git a/runtime/parachains/src/inclusion.rs b/runtime/parachains/src/inclusion.rs index c31c4a62e6e2..6f067d42f42f 100644 --- a/runtime/parachains/src/inclusion.rs +++ b/runtime/parachains/src/inclusion.rs @@ -25,7 +25,7 @@ use primitives::{ parachain::{ ValidatorId, AbridgedCandidateReceipt, ValidatorIndex, Id as ParaId, AvailabilityBitfield as AvailabilityBitfield, SignedAvailabilityBitfields, SigningContext, - BackedCandidate, + BackedCandidate, CoreIndex, GroupIndex, CoreAssignment, }, }; use frame_support::{ @@ -38,7 +38,7 @@ use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use sp_staking::SessionIndex; use sp_runtime::{DispatchError, traits::{One, Saturating}}; -use crate::{configuration, paras, scheduler::{CoreIndex, GroupIndex, CoreAssignment}}; +use crate::{configuration, paras}; /// A bitfield signed by a validator indicating that it is keeping its piece of the erasure-coding /// for any backed candidates referred to by a `1` bit available. diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index 6539915946c8..7934f3618b08 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -38,7 +38,10 @@ use sp_std::prelude::*; use sp_std::convert::TryInto; use primitives::{ - parachain::{Id as ParaId, CollatorId, ValidatorIndex}, + parachain::{ + Id as ParaId, ValidatorIndex, CoreAssignment, CoreOccupied, CoreIndex, AssignmentKind, + GroupIndex, ParathreadClaim, ParathreadEntry, + }, }; use frame_support::{ decl_storage, decl_module, decl_error, @@ -52,41 +55,6 @@ use rand_chacha::ChaCha20Rng; use crate::{configuration, paras, initializer::SessionChangeNotification}; -/// The unique (during session) index of a core. -#[derive(Encode, Decode, Default, PartialOrd, Ord, Eq, PartialEq, Clone, Copy)] -#[cfg_attr(test, derive(Debug))] -pub struct CoreIndex(u32); - -impl From for CoreIndex { - fn from(i: u32) -> CoreIndex { - CoreIndex(i) - } -} - -/// The unique (during session) index of a validator group. -#[derive(Encode, Decode, Default, Clone, Copy)] -#[cfg_attr(test, derive(PartialEq, Debug))] -pub struct GroupIndex(u32); - -impl From for GroupIndex { - fn from(i: u32) -> GroupIndex { - GroupIndex(i) - } -} - -/// A claim on authoring the next block for a given parathread. -#[derive(Clone, Encode, Decode, Default)] -#[cfg_attr(test, derive(PartialEq, Debug))] -pub struct ParathreadClaim(pub ParaId, pub CollatorId); - -/// An entry tracking a claim to ensure it does not pass the maximum number of retries. -#[derive(Clone, Encode, Decode, Default)] -#[cfg_attr(test, derive(PartialEq, Debug))] -pub struct ParathreadEntry { - claim: ParathreadClaim, - retries: u32, -} - /// A queued parathread entry, pre-assigned to a core. #[derive(Encode, Decode, Default)] #[cfg_attr(test, derive(PartialEq, Debug))] @@ -125,58 +93,6 @@ impl ParathreadClaimQueue { } } -/// What is occupying a specific availability core. -#[derive(Clone, Encode, Decode)] -#[cfg_attr(test, derive(PartialEq, Debug))] -pub(crate) enum CoreOccupied { - Parathread(ParathreadEntry), - Parachain, -} - -/// The assignment type. -#[derive(Clone, Encode, Decode)] -#[cfg_attr(test, derive(PartialEq, Debug))] -pub enum AssignmentKind { - Parachain, - Parathread(CollatorId, u32), -} - -/// How a free core is scheduled to be assigned. -#[derive(Clone, Encode, Decode)] -#[cfg_attr(test, derive(PartialEq, Debug))] -pub struct CoreAssignment { - /// The core that is assigned. - pub core: CoreIndex, - /// The unique ID of the para that is assigned to the core. - pub para_id: ParaId, - /// The kind of the assignment. - pub kind: AssignmentKind, - /// The index of the validator group assigned to the core. - pub group_idx: GroupIndex, -} - -impl CoreAssignment { - /// Get the ID of a collator who is required to collate this block. - pub(crate) fn required_collator(&self) -> Option<&CollatorId> { - match self.kind { - AssignmentKind::Parachain => None, - AssignmentKind::Parathread(ref id, _) => Some(id), - } - } - - fn to_core_occupied(&self) -> CoreOccupied { - match self.kind { - AssignmentKind::Parachain => CoreOccupied::Parachain, - AssignmentKind::Parathread(ref collator, retries) => CoreOccupied::Parathread( - ParathreadEntry { - claim: ParathreadClaim(self.para_id, collator.clone()), - retries, - } - ), - } - } -} - /// Reasons a core might be freed pub enum FreedReason { /// The core's work concluded and the parablock assigned to it is considered available. From 45ead89f023d7a9935c968d79bc4a24ee2be5373 Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Sat, 27 Jun 2020 23:16:58 +0300 Subject: [PATCH 03/18] Initial implementation. --- Cargo.lock | 73 ++- Cargo.toml | 1 + node/core/backing/Cargo.toml | 26 + node/core/backing/src/lib.rs | 773 ++++++++++++++++++++++++++++ node/messages/src/lib.rs | 15 +- node/overseer/src/lib.rs | 22 + primitives/src/parachain.rs | 14 +- runtime/parachains/src/inclusion.rs | 3 +- runtime/parachains/src/scheduler.rs | 2 +- validation/src/shared_table/mod.rs | 1 + 10 files changed, 893 insertions(+), 37 deletions(-) create mode 100644 node/core/backing/Cargo.toml create mode 100644 node/core/backing/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 27217d39daa9..954c718d0631 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -224,6 +224,27 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +[[package]] +name = "backing" +version = "0.1.0" +dependencies = [ + "derive_more 0.99.9", + "futures 0.3.5", + "futures-timer 3.0.2", + "log 0.4.8", + "polkadot-node-messages", + "polkadot-node-primitives", + "polkadot-overseer", + "polkadot-primitives", + "sc-client-api", + "sc-keystore", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-keyring", + "streamunordered", +] + [[package]] name = "backtrace" version = "0.3.49" @@ -836,9 +857,9 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.8" +version = "0.99.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc655351f820d774679da6cdc23355a93de496867d8203496675162e17b1d671" +checksum = "298998b1cf6b5b2c8a7b023dfd45821825ce3ba8a8af55c921a0e734e4653f76" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", @@ -4135,7 +4156,7 @@ dependencies = [ name = "polkadot-availability-store" version = "0.8.12" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "exit-future", "futures 0.3.5", "kvdb", @@ -4311,7 +4332,7 @@ dependencies = [ name = "polkadot-parachain" version = "0.8.12" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "log 0.4.8", "parity-scale-codec", "parking_lot 0.10.2", @@ -5534,7 +5555,7 @@ version = "0.8.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ "bytes 0.5.4", - "derive_more 0.99.8", + "derive_more 0.99.9", "futures 0.3.5", "futures-timer 3.0.2", "libp2p", @@ -5630,7 +5651,7 @@ dependencies = [ "ansi_term 0.12.1", "atty", "chrono", - "derive_more 0.99.8", + "derive_more 0.99.9", "env_logger", "fdlimit", "futures 0.3.5", @@ -5667,7 +5688,7 @@ name = "sc-client-api" version = "2.0.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "fnv", "futures 0.3.5", "hash-db", @@ -5743,7 +5764,7 @@ name = "sc-consensus-babe" version = "0.8.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "fork-tree", "futures 0.3.5", "futures-timer 3.0.2", @@ -5785,7 +5806,7 @@ name = "sc-consensus-babe-rpc" version = "0.8.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "futures 0.3.5", "jsonrpc-core", "jsonrpc-core-client", @@ -5858,7 +5879,7 @@ name = "sc-executor" version = "0.8.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "lazy_static", "libsecp256k1", "log 0.4.8", @@ -5886,7 +5907,7 @@ name = "sc-executor-common" version = "0.8.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "log 0.4.8", "parity-scale-codec", "parity-wasm", @@ -5940,7 +5961,7 @@ version = "0.8.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ "assert_matches", - "derive_more 0.99.8", + "derive_more 0.99.9", "finality-grandpa", "fork-tree", "futures 0.3.5", @@ -5977,7 +5998,7 @@ name = "sc-finality-grandpa-rpc" version = "0.8.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "finality-grandpa", "futures 0.3.5", "jsonrpc-core", @@ -6013,7 +6034,7 @@ name = "sc-keystore" version = "2.0.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "hex", "merlin", "parking_lot 0.10.2", @@ -6051,7 +6072,7 @@ dependencies = [ "bitflags", "bs58", "bytes 0.5.4", - "derive_more 0.99.8", + "derive_more 0.99.9", "either", "erased-serde", "fnv", @@ -6223,7 +6244,7 @@ name = "sc-rpc-api" version = "0.8.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "futures 0.3.5", "jsonrpc-core", "jsonrpc-core-client", @@ -6263,7 +6284,7 @@ name = "sc-service" version = "0.8.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "directories", "exit-future", "futures 0.1.29", @@ -6379,7 +6400,7 @@ name = "sc-transaction-graph" version = "2.0.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "futures 0.3.5", "linked-hash-map", "log 0.4.8", @@ -6399,7 +6420,7 @@ name = "sc-transaction-pool" version = "2.0.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "futures 0.3.5", "futures-diagnose", "intervalier", @@ -6786,7 +6807,7 @@ name = "sp-allocator" version = "2.0.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "log 0.4.8", "sp-core", "sp-std", @@ -6885,7 +6906,7 @@ name = "sp-blockchain" version = "2.0.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "log 0.4.8", "lru", "parity-scale-codec", @@ -6910,7 +6931,7 @@ name = "sp-consensus" version = "0.8.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "futures 0.3.5", "futures-timer 3.0.2", "libp2p", @@ -6981,7 +7002,7 @@ dependencies = [ "base58", "blake2-rfc", "byteorder", - "derive_more 0.99.8", + "derive_more 0.99.9", "ed25519-dalek", "futures 0.3.5", "hash-db", @@ -7076,7 +7097,7 @@ name = "sp-inherents" version = "2.0.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "parity-scale-codec", "parking_lot 0.10.2", "sp-core", @@ -7314,7 +7335,7 @@ name = "sp-transaction-pool" version = "2.0.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "futures 0.3.5", "log 0.4.8", "parity-scale-codec", @@ -7558,7 +7579,7 @@ version = "0.8.0-rc3" source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" dependencies = [ "async-std", - "derive_more 0.99.8", + "derive_more 0.99.9", "futures-util", "hyper 0.13.6", "log 0.4.8", diff --git a/Cargo.toml b/Cargo.toml index f3204924187f..803383ee3cf5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,6 +46,7 @@ members = [ "node/overseer", "node/primitives", "node/service", + "node/core/backing", "parachain/test-parachains", "parachain/test-parachains/adder", diff --git a/node/core/backing/Cargo.toml b/node/core/backing/Cargo.toml new file mode 100644 index 000000000000..a04a602fc3b0 --- /dev/null +++ b/node/core/backing/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "backing" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.5" +log = "0.4.8" +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } +keystore = { package = "sc-keystore", git = "https://github.com/paritytech/substrate", branch = "master" } +primitives = { package = "sp-core", git = "https://github.com/paritytech/substrate", branch = "master" } + +polkadot-primitives = { path = "../../../primitives" } +polkadot-node-primitives = { path = "../../primitives" } +polkadot-overseer = { path = "../../overseer" } +messages = { package = "polkadot-node-messages", path = "../../messages" } +futures-timer = "3.0.2" +streamunordered = "0.5.1" +derive_more = "0.99.9" + +[dev-dependencies] +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +futures = { version = "0.3.5", features = ["thread-pool"] } diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs new file mode 100644 index 000000000000..4123cb86db3f --- /dev/null +++ b/node/core/backing/src/lib.rs @@ -0,0 +1,773 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implements a `CandidateBackingSubsystem`. + +#![recursion_limit="256"] + +use std::collections::{HashMap, HashSet}; +use std::pin::Pin; +use std::sync::Arc; +use std::time::Duration; + +use log; +use futures::{ + select, FutureExt, SinkExt, StreamExt, + channel::{oneshot, mpsc}, + future::{self, Either}, + task::{Spawn, SpawnExt}, +}; +use futures_timer::Delay; +use streamunordered::{StreamUnordered, StreamYield}; + +use primitives::Pair; +use keystore::KeyStorePtr; +use polkadot_primitives::{ + Hash, BlockNumber, + parachain::{ + AbridgedCandidateReceipt, Id as ParaId, ValidatorPair, ValidatorId, ValidatorIndex, + HeadData, ValidationCode, SigningContext, PoVBlock, GroupIndex, + }, +}; +use polkadot_overseer::{Subsystem, SubsystemContext, SpawnedSubsystem}; +use polkadot_node_primitives::{Statement, SignedFullStatement}; +use messages::{ + AllMessages, AvailabilityStoreMessage, FromOverseer, CandidateBackingMessage, SchedulerRoster, + OverseerSignal, RuntimeApiMessage, RuntimeApiRequest, CandidateValidationMessage, ValidationFailed, + StatementDistributionMessage, NewBackedCandidate, +}; + +#[derive(Debug, derive_more::From)] +enum Error { + NotInValidatorSet, + #[from] + ValidationFailed(ValidationFailed), + #[from] + Oneshot(oneshot::Canceled), + #[from] + Mpsc(mpsc::SendError), +} + +/// Holds all data needed for candidate backing job operation. +struct CandidateBackingJob { + /// The keystore which holds the signing keys. + keystore: KeyStorePtr, + /// The hash of the relay parent on top of which this job is doing it's work. + parent: Hash, + /// Inbound message channel receiving part. + rx_to: mpsc::Receiver, + /// Inbound message sending part. Handed out to whom it may concern. + tx_to: mpsc::Sender, + /// Outbound message channel sending part. + tx_from: mpsc::Sender, + + /// Validator set. + validators: Vec, + /// The validation codes of the `ParaId`s we are assigned as validators to. + validation_code: HashMap, + /// `HeadData`s of the parachains that this validator is assigned to. + head_data: HashMap, + /// `SigningContext` to use when signing. + signing_context: SigningContext, + /// This validator's signing key. + signing_key: Arc, + /// Index of this validator. + index: ValidatorIndex, + /// The `ParaId`s assigned to this validator. + assigned_ids: HashSet, + /// We issued `Valid` statements on about these candidates. + issued_validity: HashSet, + /// `Some(h)` if this job has already issues `Seconded` statemt for some candidate with `h` hash. + seconded: Option, + /// Registered backing watchers. + backing_watchers: HashMap>>, + /// Valid votes by candidate hash. + valid_votes: HashMap>, +} + +const CHANNEL_CAPACITY: usize = 64; + +/// A message type that is sent from `CandidateBackingSubsystem` to `CandidateBackingJob`. +enum ToJob { + /// A `CandidateBackingMessage`. + CandidateBacking(CandidateBackingMessage), + /// Stop working. + Stop, +} + +/// A message type that is sent from `CandidateBackingJob` to `CandidateBackingSubsystem`. +enum FromJob { + RuntimeApiMessage(RuntimeApiMessage), + AvailabilityStoreMessage(AvailabilityStoreMessage), + CandidateValidation(CandidateValidationMessage), + StatementDistribution(StatementDistributionMessage), +} + +impl From for AllMessages { + fn from(f: FromJob) -> Self { + match f { + FromJob::RuntimeApiMessage(msg) => AllMessages::RuntimeApi(msg), + FromJob::AvailabilityStoreMessage(msg) => AllMessages::AvailabilityStore(msg), + FromJob::CandidateValidation(msg) => AllMessages::CandidateValidation(msg), + FromJob::StatementDistribution(msg) => AllMessages::StatementDistribution(msg), + } + } +} + +// finds the first key we are capable of signing with out of the given set of validators, +// if any. +fn signing_key(validators: &[ValidatorId], keystore: &KeyStorePtr) -> Option> { + let keystore = keystore.read(); + validators.iter() + .find_map(|v| { + keystore.key_pair::(&v).ok() + }) + .map(|pair| Arc::new(pair)) +} + +impl CandidateBackingJob { + /// Create a new `CandidateBackingJob` working on top of some parent and with a + /// keystore holding keys. + fn new(parent: Hash, keystore: KeyStorePtr) -> (Self, mpsc::Receiver) { + let (tx_to, rx_to) = mpsc::channel(CHANNEL_CAPACITY); + let (tx_from, rx_from) = mpsc::channel(CHANNEL_CAPACITY); + + ( + Self { + keystore, + parent, + rx_to, + tx_to, + tx_from, + + validators: Vec::new(), + validation_code: HashMap::default(), + head_data: HashMap::default(), + signing_key: Arc::new(Pair::from_seed(&[1; 32])), // TODO: No no no + signing_context: SigningContext::default(), + assigned_ids: HashSet::default(), + index: 0, + + issued_validity: HashSet::default(), + seconded: None, + backing_watchers: HashMap::default(), + valid_votes: HashMap::default(), + }, + rx_from, + ) + } + + /// Hand out the sending part of the channel to communicate with this job. + fn tx(&mut self) -> mpsc::Sender { + self.tx_to.clone() + } + + /// Request a validator set from the `RuntimeApi`. + async fn request_validators(&mut self) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + + self.tx_from.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( + self.parent.clone(), + RuntimeApiRequest::Validators(tx), + ) + )).await?; + + Ok(rx.await?) + } + + /// Request the scheduler roster from `RuntimeApi`. + async fn request_validator_groups(&mut self) -> Result { + let (tx, rx) = oneshot::channel(); + + self.tx_from.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( + self.parent.clone(), + RuntimeApiRequest::ValidatorGroups(tx), + ) + )).await?; + + Ok(rx.await?) + } + + /// Request the validation code for some `ParaId` at given block + /// from `RuntimeApi`. + async fn request_validation_code( + &mut self, + id: ParaId, + parent: BlockNumber, + parablock: Option, + ) -> Result { + let (tx, rx) = oneshot::channel(); + + self.tx_from.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( + self.parent.clone(), + RuntimeApiRequest::ValidationCode(id, parent, parablock, tx), + ) + )).await?; + + Ok(rx.await?) + } + + /// Request a `SigningContext` from the `RuntimeApi`. + async fn request_signing_context(&mut self) -> Result { + let (tx, rx) = oneshot::channel(); + + self.tx_from.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( + self.parent.clone(), + RuntimeApiRequest::SigningContext(tx), + ) + )).await?; + + Ok(rx.await?) + } + + /// Request `HeadData` for some `ParaId` from `RuntimeApi`. + async fn request_head_data(&mut self, id: ParaId) -> Result { + let (tx, rx) = oneshot::channel(); + + self.tx_from.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( + self.parent.clone(), + RuntimeApiRequest::HeadData(id, tx), + ) + )).await?; + + Ok(rx.await?) + } + + /// Logic that runs on startup. + async fn on_startup(&mut self) -> Result<(), Error> { + let validators = self.request_validators().await?; + + let roster = self.request_validator_groups().await?; + + self.signing_key = signing_key(&validators[..], &self.keystore).unwrap(); + self.index = validators + .iter() + .position(|x| *x == self.signing_key.as_ref().public()) + .ok_or_else(|| Error::NotInValidatorSet)? as u32; + + let mut our_groups = HashSet::::default(); + + self.validators = validators; + + for (i, group) in roster.validator_groups.iter().enumerate() { + if group.iter().find(|id| **id == self.index).is_some() { + our_groups.insert((i as u32).into()); + } + } + + for assignment in roster.scheduled.iter() { + if our_groups.contains(&assignment.group_idx) { + self.assigned_ids.insert(assignment.para_id); + + let validation_code = self.request_validation_code(assignment.para_id, 0, None).await?; + self.validation_code.insert(assignment.para_id, validation_code); + + let head_data = self.request_head_data(assignment.para_id).await?; + + self.head_data.insert(assignment.para_id, head_data); + } + } + + self.signing_context = self.request_signing_context().await?; + + Ok(()) + } + + /// Run asynchronously. + async fn run(mut self) -> Result<(), Error> { + self.on_startup().await?; + + while let Some(msg) = self.rx_to.next().await { + match msg { + ToJob::CandidateBacking(msg) => { + self.process_msg(msg).await?; + } + _ => break, + } + } + + Ok(()) + } + + /// Validate the candidate that is requested to be `Second`ed and distribute validation result. + async fn validate_and_second(&mut self, candidate: AbridgedCandidateReceipt) -> Result<(), Error> { + let statement = match self.spawn_validation_work(candidate.clone()).await { + Ok(()) => { + // make PoV available for later distribution. Send data to the availability + // store to keep. Sign and dispatch `valid` statement to network if we + // have not seconded the given candidate. + Statement::Seconded(candidate) + } + + // sign and dispatch `invalid` statement to network. + Err(_) => Statement::Invalid(candidate.hash()), + }; + + let signed_statement = SignedFullStatement::sign( + statement, + &self.signing_context, + self.index, + &self.signing_key, + ); + + self.distribute_signed_statement(signed_statement).await?; + + Ok(()) + } + + async fn process_msg(&mut self, msg: CandidateBackingMessage) -> Result<(), Error> { + match msg { + CandidateBackingMessage::Second(_, candidate) => { + // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a + // Seconded statement only if we have not seconded any other candidate and + // have not signed a Valid statement for the requested candidate. + match self.seconded { + // This job has not seconded a candidate yet. + None => { + let candidate_hash = candidate.hash(); + + if !self.issued_validity.contains(&candidate_hash) { + if let Ok(()) = self.validate_and_second(candidate).await { + self.seconded = Some(candidate_hash); + } + } + } + // This job has already seconded a candidate. + Some(_) => { + }, + } + } + CandidateBackingMessage::Statement(hash, statement) => { + let idx = statement.validator_index() as usize; + + if self.validators.len() < idx { + match statement.check_signature(&self.signing_context, &self.validators[idx]) { + Ok(()) => { + // TODO: check if the validator issuing this statent is a part of the group for the + // `ParaId` in question. + // TODO: check if the validator issuing this statement is not equivocating. + match statement.payload() { + Statement::Seconded(candidate) => { + let hash = candidate.hash(); + self.valid_votes.entry(hash).or_default().push(statement.validator_index()); + } + Statement::Valid(hash) => { + self.valid_votes.entry(*hash).or_default().push(statement.validator_index()); + } + Statement::Invalid(_) => {}, + } + + if let Some(_) = self.valid_votes.get(&hash) { + // TODO: check the quorum. + // How do we go from `ParaId` in the + } + } + Err(()) => { + } + } + } + } + CandidateBackingMessage::RegisterBackingWatcher(hash, tx) => { + match self.backing_watchers.get_mut(&hash) { + Some(watchers) => watchers.push(tx), + None => { self.backing_watchers.insert(hash, vec![tx]); } + } + } + } + + Ok(()) + } + + async fn request_candidate_validation( + &mut self, + candidate: AbridgedCandidateReceipt, + pov: PoVBlock, + ) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + + self.tx_from.send(FromJob::CandidateValidation( + CandidateValidationMessage::Validate( + self.parent, + candidate, + pov, + tx, + ) + ) + ).await?; + + rx.await??; + + Ok(()) + } + + async fn distribute_signed_statement(&mut self, s: SignedFullStatement) -> Result<(), Error> { + let smsg = StatementDistributionMessage::Share(self.parent, s); + + self.tx_from.send(FromJob::StatementDistribution(smsg)).await?; + + Ok(()) + } + + async fn spawn_validation_work( + &mut self, + candidate: AbridgedCandidateReceipt, + ) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + + self.tx_from.send(FromJob::AvailabilityStoreMessage( + AvailabilityStoreMessage::QueryPoV( + candidate.pov_block_hash, + tx, + ) + ) + ).await?; + + let pov = rx.await?.unwrap(); + + self.request_candidate_validation(candidate, pov).await + } +} + +struct JobHandle { + abort_handle: future::AbortHandle, + to_job: mpsc::Sender, + finished: oneshot::Receiver<()>, + su_handle: usize, +} + +impl JobHandle { + async fn stop(mut self) { + let _ = self.to_job.send(ToJob::Stop).await; + let stop_timer = Delay::new(Duration::from_secs(1)); + + match future::select(stop_timer, self.finished).await { + Either::Left((_, _)) => { + }, + Either::Right((_, _)) => { + self.abort_handle.abort(); + }, + } + } + + async fn send_msg(&mut self, msg: ToJob) -> Result<(), Error> { + Ok(self.to_job.send(msg).await?) + } +} + +struct Jobs { + spawner: S, + running: HashMap, + outgoing_msgs: StreamUnordered>, +} + +impl Jobs { + fn new(spawner: S) -> Self { + Self { + spawner, + running: HashMap::default(), + outgoing_msgs: StreamUnordered::new(), + } + } + + fn spawn_job(&mut self, parent_hash: Hash, keystore: KeyStorePtr) -> Result<(), ()> { + let (mut job, from_job) = CandidateBackingJob::new(parent_hash,keystore); + + let to_job = job.tx(); + + let (future, abort_handle) = future::abortable(async move { + if let Err(_) = job.run().await { + log::error!("Job returned an error"); + } + }); + let (finished_tx, finished) = oneshot::channel(); + + let future = async move { + let _ = future.await; + let _ = finished_tx.send(()); + }; + self.spawner.spawn(future).map_err(|_| ())?; + + let su_handle = self.outgoing_msgs.push(from_job); + + let handle = JobHandle { + abort_handle, + to_job, + finished, + su_handle, + }; + + self.running.insert(parent_hash, handle); + + Ok(()) + } + + async fn stop_job(&mut self, parent_hash: Hash) -> Result<(), ()> { + match self.running.remove(&parent_hash) { + Some(handle) => { + Pin::new(&mut self.outgoing_msgs).remove(handle.su_handle); + handle.stop().await; + Ok(()) + } + None => Err(()) + } + } + + async fn send_msg(&mut self, parent_hash: Hash, msg: ToJob) { + if let Some(job) = self.running.get_mut(&parent_hash) { + let _ = job.send_msg(msg).await; + } + } + + async fn next(&mut self) -> Option { + self.outgoing_msgs.next().await.and_then(|(e, _)| match e { + StreamYield::Item(e) => Some(e), + _ => None, + }) + } +} + +pub struct CandidateBackingSubsystem { + spawner: S, + keystore: KeyStorePtr, +} + +impl CandidateBackingSubsystem { + pub fn new(keystore: KeyStorePtr, spawner: S) -> Self { + Self { + spawner, + keystore, + } + } + + async fn run( + mut ctx: SubsystemContext, + keystore: KeyStorePtr, + spawner: S, + ) { + let mut jobs = Jobs::new(spawner.clone()); + + loop { + select! { + incoming = ctx.recv().fuse() => { + match incoming { + Ok(msg) => match msg { + FromOverseer::Signal(OverseerSignal::StartWork(hash)) => { + let _ = jobs.spawn_job(hash, keystore.clone()); + } + FromOverseer::Signal(OverseerSignal::StopWork(hash)) => { + let _ = jobs.stop_job(hash); + } + FromOverseer::Communication { msg } => { + match msg { + CandidateBackingMessage::Second(hash, _) => { + let _ = jobs.send_msg(hash.clone(), ToJob::CandidateBacking(msg)).await; + } + _ => (), + } + } + _ => (), + }, + Err(_) => break, + } + } + outgoing = jobs.next().fuse() => { + match outgoing { + Some(msg) => { + let _ = ctx.send_msg(msg.into()).await; + } + None => break, + } + } + complete => break, + } + } + } +} + +impl Subsystem for CandidateBackingSubsystem { + fn start(&mut self, ctx: SubsystemContext) -> SpawnedSubsystem { + let keystore = self.keystore.clone(); + let spawner = self.spawner.clone(); + + SpawnedSubsystem(Box::pin(async move { + Self::run(ctx, keystore, spawner).await; + })) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use futures::{channel::mpsc, executor, pin_mut, StreamExt}; + use sp_keyring::sr25519::Keyring; + use primitives::Pair; + use polkadot_primitives::parachain::{ + AssignmentKind, CoreAssignment, BlockData, CoreIndex, + }; + + #[test] + fn backing_works() { + let spawner = executor::ThreadPool::new().unwrap(); + let keystore = keystore::Store::new_in_memory(); + + let alice_key = Keyring::Alice.pair(); + let alice_id: ValidatorId = alice_key.public().into(); + let bob_key = Keyring::Bob.pair(); + let bob_id: ValidatorId = bob_key.public().into(); + + // Make sure `Alice` key is in the keystore, so this mocked node will be a parachain validator. + keystore.write().insert_ephemeral_from_seed::(&Keyring::Alice.to_seed()) + .expect("Insert key into keystore"); + + executor::block_on(async move { + let parent_hash_1 = [1; 32].into(); + let mut cbs = CandidateBackingSubsystem::new(keystore, spawner.clone()); + let (mut tx, rx) = mpsc::channel(64); + let (ctx, outgoing) = SubsystemContext::::new_testing(rx); + + let roster = SchedulerRoster { + validator_groups: vec![vec![0], vec![1]], + scheduled: vec![ + CoreAssignment { + core: CoreIndex(0), + para_id: 0u32.into(), + kind: AssignmentKind::Parachain, + group_idx: GroupIndex(0), + } + ], + upcoming: vec![], + availability_cores: vec![], + }; + let validation_code = ValidationCode(vec![1, 2, 3]); + let head_data = HeadData(vec![4, 5, 6]); + let signing_context = SigningContext { + session_index: 1, + parent_hash: parent_hash_1, + }; + + let spawned = cbs.start(ctx).0; + + spawner.spawn(spawned).unwrap(); + pin_mut!(outgoing); + + // Start work on some new parent. + tx.send(FromOverseer::Signal(OverseerSignal::StartWork(parent_hash_1))).await.unwrap(); + + // Check that subsystem job issues a request for a validator set. + match outgoing.next().await.unwrap() { + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) + ) if parent == parent_hash_1 => { + tx.send(vec![alice_id.clone(), bob_id]).unwrap(); + } + msg => panic!("unexpected message {:?}", msg), + } + + // Check that subsystem job issues a request for the validator groups. + match outgoing.next().await.unwrap() { + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx)) + ) if parent == parent_hash_1 => { + tx.send(roster).unwrap(); + } + msg => panic!("unexpected message {:?}", msg), + } + + // Check that subsystem job issues a request for the validation code. + match outgoing.next().await.unwrap() { + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidationCode(_, _, _, tx)) + ) if parent == parent_hash_1 => { + tx.send(validation_code.clone()).unwrap(); + } + msg => panic!("unexpected message {:?}", msg), + } + + // Check that subsystem job issues a request for the head data. + match outgoing.next().await.unwrap() { + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::HeadData(_, tx)) + ) if parent == parent_hash_1 => { + tx.send(head_data.clone()).unwrap(); + } + msg => panic!("unexpected message {:?}", msg), + } + + // Check that subsystem job issues a request for the signing context. + match outgoing.next().await.unwrap() { + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SigningContext(tx)) + ) if parent == parent_hash_1 => { + tx.send(signing_context.clone()).unwrap(); + } + msg => panic!("unexpected message {:?}", msg), + } + + let mut candidate = AbridgedCandidateReceipt::default(); + candidate.parachain_index = 0.into(); + candidate.relay_parent = parent_hash_1; + candidate.pov_block_hash = [2; 32].into(); + + let second = CandidateBackingMessage::Second(parent_hash_1, candidate.clone()); + + tx.send(FromOverseer::Communication{ msg: second }).await.unwrap(); + + let pov_block = PoVBlock { + block_data: BlockData(vec![42, 43, 44]), + }; + + match outgoing.next().await.unwrap() { + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::QueryPoV( + pov_hash, + tx, + ) + ) if pov_hash == [2; 32].into() => { + tx.send(Some(pov_block.clone())).unwrap(); + } + msg => panic!("unexpected msg {:?}", msg), + } + + match outgoing.next().await.unwrap() { + AllMessages::CandidateValidation( + CandidateValidationMessage::Validate( + parent_hash, + c, + pov, + tx, + ) + ) if parent_hash == parent_hash_1 && pov == pov_block && c == candidate => { + tx.send(Ok(())).unwrap(); + } + msg => panic!("unexpected msg {:?}", msg), + } + + match outgoing.next().await.unwrap() { + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + signed_statement, + ) + ) if parent_hash == parent_hash_1 => { + signed_statement.check_signature(&signing_context, &alice_id).unwrap(); + } + msg => panic!("unexpected message {:?}", msg), + } + + tx.send(FromOverseer::Signal(OverseerSignal::StopWork(parent_hash_1))).await.unwrap(); + }); + } +} diff --git a/node/messages/src/lib.rs b/node/messages/src/lib.rs index 68eb3a4c4b99..947c3b8e45b6 100644 --- a/node/messages/src/lib.rs +++ b/node/messages/src/lib.rs @@ -29,7 +29,7 @@ use polkadot_primitives::{BlockNumber, Hash, Signature}; use polkadot_primitives::parachain::{ AbridgedCandidateReceipt, PoVBlock, ErasureChunk, BackedCandidate, Id as ParaId, SignedAvailabilityBitfield, SigningContext, ValidatorId, ValidationCode, ValidatorIndex, - CoreAssignment, CoreOccupied, + CoreAssignment, CoreOccupied, HeadData, }; use polkadot_node_primitives::{ MisbehaviorReport, SignedFullStatement, @@ -148,6 +148,7 @@ pub enum BitfieldDistributionMessage { } /// Availability store subsystem message. +#[derive(Debug)] pub enum AvailabilityStoreMessage { /// Query a `PoVBlock` from the AV store. QueryPoV(Hash, oneshot::Sender>), @@ -160,6 +161,7 @@ pub enum AvailabilityStoreMessage { } /// The information on scheduler assignments that some somesystems may be querying. +#[derive(Debug)] pub struct SchedulerRoster { /// Validator-to-groups assignments. pub validator_groups: Vec>, @@ -172,6 +174,7 @@ pub struct SchedulerRoster { } /// A request to the Runtime API subsystem. +#[derive(Debug)] pub enum RuntimeApiRequest { /// Get the current validator set. Validators(oneshot::Sender>), @@ -183,15 +186,19 @@ pub enum RuntimeApiRequest { /// an optional block number representing an intermediate parablock executed in the context of /// that block. ValidationCode(ParaId, BlockNumber, Option, oneshot::Sender), + /// Get head data for a specific para. + HeadData(ParaId, oneshot::Sender), } /// A message to the Runtime API subsystem. +#[derive(Debug)] pub enum RuntimeApiMessage { /// Make a request of the runtime API against the post-state of the given relay-parent. Request(Hash, RuntimeApiRequest), } /// Statement distribution message. +#[derive(Debug)] pub enum StatementDistributionMessage { /// We have originated a signed statement in the context of /// given relay-parent hash and it should be distributed to other validators. @@ -228,6 +235,12 @@ pub enum AllMessages { CandidateValidation(CandidateValidationMessage), /// Message for the candidate backing subsystem. CandidateBacking(CandidateBackingMessage), + /// Message for Runtime API subsystem. + RuntimeApi(RuntimeApiMessage), + /// Message for the Availability Store subsystem. + AvailabilityStore(AvailabilityStoreMessage), + /// Message for the Statement Distribution subsystem. + StatementDistribution(StatementDistributionMessage), } /// A message type that a subsystem receives from an overseer. diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 0d3c9b7b5095..f23a4f5f197c 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -336,6 +336,25 @@ impl SubsystemContext { tx, } } + + pub fn new_testing( + rx: mpsc::Receiver>, + ) -> (Self, impl futures::Stream) { + let (tx, sender) = mpsc::channel(64); + + ( + Self { + rx, + tx, + }, + sender.filter_map(|x| async move { + match x { + ToOverseer::SubsystemMessage(msg) => Some(msg), + _ => None, + } + }), + ) + } } /// A trait that describes the [`Subsystem`]s that can run on the [`Overseer`]. @@ -680,6 +699,9 @@ where let _ = s.tx.send(FromOverseer::Communication { msg }).await; } } + AllMessages::RuntimeApi(_) => unimplemented!(), + AllMessages::AvailabilityStore(_) => unimplemented!(), + AllMessages::StatementDistribution(_) => unimplemented!(), } } diff --git a/primitives/src/parachain.rs b/primitives/src/parachain.rs index 7055f8590fc8..1f22e6371882 100644 --- a/primitives/src/parachain.rs +++ b/primitives/src/parachain.rs @@ -172,7 +172,7 @@ pub struct DutyRoster { /// The unique (during session) index of a core. #[derive(Encode, Decode, Default, PartialOrd, Ord, Eq, PartialEq, Clone, Copy)] -#[cfg_attr(test, derive(Debug))] +#[cfg_attr(feature = "std", derive(Debug))] pub struct CoreIndex(pub u32); impl From for CoreIndex { @@ -183,7 +183,7 @@ impl From for CoreIndex { /// The unique (during session) index of a validator group. #[derive(Encode, Decode, Default, Clone, Copy)] -#[cfg_attr(test, derive(PartialEq, Debug))] +#[cfg_attr(feature = "std", derive(Eq, Hash, PartialEq, Debug))] pub struct GroupIndex(pub u32); impl From for GroupIndex { @@ -194,12 +194,12 @@ impl From for GroupIndex { /// A claim on authoring the next block for a given parathread. #[derive(Clone, Encode, Decode, Default)] -#[cfg_attr(test, derive(PartialEq, Debug))] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] pub struct ParathreadClaim(pub Id, pub CollatorId); /// An entry tracking a claim to ensure it does not pass the maximum number of retries. #[derive(Clone, Encode, Decode, Default)] -#[cfg_attr(test, derive(PartialEq, Debug))] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] pub struct ParathreadEntry { /// The claim. pub claim: ParathreadClaim, @@ -209,7 +209,7 @@ pub struct ParathreadEntry { /// What is occupying a specific availability core. #[derive(Clone, Encode, Decode)] -#[cfg_attr(test, derive(PartialEq, Debug))] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] pub enum CoreOccupied { /// A parathread. Parathread(ParathreadEntry), @@ -219,7 +219,7 @@ pub enum CoreOccupied { /// The assignment type. #[derive(Clone, Encode, Decode)] -#[cfg_attr(test, derive(PartialEq, Debug))] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] pub enum AssignmentKind { /// A parachain. Parachain, @@ -229,7 +229,7 @@ pub enum AssignmentKind { /// How a free core is scheduled to be assigned. #[derive(Clone, Encode, Decode)] -#[cfg_attr(test, derive(PartialEq, Debug))] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] pub struct CoreAssignment { /// The core that is assigned. pub core: CoreIndex, diff --git a/runtime/parachains/src/inclusion.rs b/runtime/parachains/src/inclusion.rs index 6f067d42f42f..2bb63d148fda 100644 --- a/runtime/parachains/src/inclusion.rs +++ b/runtime/parachains/src/inclusion.rs @@ -498,7 +498,7 @@ mod tests { use primitives::{BlockNumber, Hash}; use primitives::parachain::{ SignedAvailabilityBitfield, CompactStatement as Statement, ValidityAttestation, CollatorId, - CandidateCommitments, SignedStatement, + CandidateCommitments, SignedStatement, AssignmentKind, }; use frame_support::traits::{OnFinalize, OnInitialize}; use keyring::Sr25519Keyring; @@ -510,7 +510,6 @@ mod tests { use crate::initializer::SessionChangeNotification; use crate::configuration::HostConfiguration; use crate::paras::ParaGenesisArgs; - use crate::scheduler::AssignmentKind; fn default_config() -> HostConfiguration { let mut config = HostConfiguration::default(); diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index 7934f3618b08..7ded85f7a210 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -586,7 +586,7 @@ impl Module { mod tests { use super::*; - use primitives::{BlockNumber, parachain::ValidatorId}; + use primitives::{BlockNumber, parachain::{CollatorId, ValidatorId}}; use frame_support::traits::{OnFinalize, OnInitialize}; use keyring::Sr25519Keyring; diff --git a/validation/src/shared_table/mod.rs b/validation/src/shared_table/mod.rs index fd6702a7e640..4cde687c22f0 100644 --- a/validation/src/shared_table/mod.rs +++ b/validation/src/shared_table/mod.rs @@ -706,6 +706,7 @@ mod tests { &validity_other_key.into(), &signing_context, ); + let signed_statement = ::table::generic::SignedStatement { statement: candidate_statement, signature: signature.into(), From 81220d8d9989fd8f7fb1ea641675ce3de649769e Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Wed, 1 Jul 2020 22:34:28 +0300 Subject: [PATCH 04/18] More functionality --- Cargo.lock | 2 + node/core/backing/Cargo.toml | 2 + node/core/backing/src/lib.rs | 483 ++++++++++++++++++++++----------- statement-table/src/generic.rs | 11 + 4 files changed, 346 insertions(+), 152 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 954c718d0631..bc6a8e8cfc4b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -228,6 +228,7 @@ checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" name = "backing" version = "0.1.0" dependencies = [ + "bitvec", "derive_more 0.99.9", "futures 0.3.5", "futures-timer 3.0.2", @@ -236,6 +237,7 @@ dependencies = [ "polkadot-node-primitives", "polkadot-overseer", "polkadot-primitives", + "polkadot-statement-table", "sc-client-api", "sc-keystore", "sp-api", diff --git a/node/core/backing/Cargo.toml b/node/core/backing/Cargo.toml index a04a602fc3b0..29a52c14370f 100644 --- a/node/core/backing/Cargo.toml +++ b/node/core/backing/Cargo.toml @@ -15,11 +15,13 @@ primitives = { package = "sp-core", git = "https://github.com/paritytech/substra polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } +statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" } polkadot-overseer = { path = "../../overseer" } messages = { package = "polkadot-node-messages", path = "../../messages" } futures-timer = "3.0.2" streamunordered = "0.5.1" derive_more = "0.99.9" +bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } [dev-dependencies] sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 4123cb86db3f..6e7df47d52cf 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -23,6 +23,7 @@ use std::pin::Pin; use std::sync::Arc; use std::time::Duration; +use bitvec::vec::BitVec; use log; use futures::{ select, FutureExt, SinkExt, StreamExt, @@ -38,7 +39,7 @@ use keystore::KeyStorePtr; use polkadot_primitives::{ Hash, BlockNumber, parachain::{ - AbridgedCandidateReceipt, Id as ParaId, ValidatorPair, ValidatorId, ValidatorIndex, + AbridgedCandidateReceipt, BackedCandidate, Id as ParaId, ValidatorPair, ValidatorId, ValidatorIndex, HeadData, ValidationCode, SigningContext, PoVBlock, GroupIndex, }, }; @@ -49,10 +50,14 @@ use messages::{ OverseerSignal, RuntimeApiMessage, RuntimeApiRequest, CandidateValidationMessage, ValidationFailed, StatementDistributionMessage, NewBackedCandidate, }; +use statement_table::{ + generic::{AttestedCandidate as TableAttestedCandidate}, + self, Table, Context as TableContextTrait, + Statement as TableStatement, SignedStatement as TableSignedStatement, +}; #[derive(Debug, derive_more::From)] enum Error { - NotInValidatorSet, #[from] ValidationFailed(ValidationFailed), #[from] @@ -74,28 +79,68 @@ struct CandidateBackingJob { /// Outbound message channel sending part. tx_from: mpsc::Sender, - /// Validator set. - validators: Vec, /// The validation codes of the `ParaId`s we are assigned as validators to. - validation_code: HashMap, + validation_code: ValidationCode, /// `HeadData`s of the parachains that this validator is assigned to. - head_data: HashMap, - /// `SigningContext` to use when signing. - signing_context: SigningContext, - /// This validator's signing key. - signing_key: Arc, - /// Index of this validator. - index: ValidatorIndex, + head_data: HeadData, /// The `ParaId`s assigned to this validator. - assigned_ids: HashSet, + assignment: ParaId, /// We issued `Valid` statements on about these candidates. issued_validity: HashSet, + /// We sent `NewBackedCandidate` on these candidates. + backed_candidates: HashSet, /// `Some(h)` if this job has already issues `Seconded` statemt for some candidate with `h` hash. seconded: Option, /// Registered backing watchers. - backing_watchers: HashMap>>, - /// Valid votes by candidate hash. - valid_votes: HashMap>, + backing_watchers: Vec>, + + table: Table, + table_context: TableContext, +} + +const fn needed_votes(n_validators: usize) -> usize { + let mut threshold = (n_validators * 2) / 3; + threshold += (n_validators * 2) % 3; + threshold +} + +#[derive(Default)] +struct TableContext { + signing_context: SigningContext, + key: Option>, + groups: HashMap>, + validators: Vec, +} + +impl TableContextTrait for TableContext { + fn is_member_of(&self, authority: ValidatorIndex, group: &ParaId) -> bool { + let key = match self.validators.get(authority as usize) { + Some(val) => val, + None => return false, + }; + + self.groups.get(group).map_or(false, |g| g.get(&key).is_some()) + } + + fn requisite_votes(&self, group: &ParaId) -> usize { + self.groups.get(group).map_or(usize::max_value(), |g| needed_votes(g.len())) + } +} + +impl TableContext { + fn local_id(&self) -> Option { + self.key.as_ref().map(|k| k.public()) + } + + fn local_index(&self) -> Option { + self.local_id().and_then(|id| + self.validators + .iter() + .enumerate() + .find(|(_, k)| k == &&id) + .map(|(i, _)| i as ValidatorIndex) + ) + } } const CHANNEL_CAPACITY: usize = 64; @@ -127,6 +172,22 @@ impl From for AllMessages { } } +// It looks like it's not possible to do an `impl From` given the current state of +// the code. So this does the necessary conversion. +fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement { + let statement = match s.payload() { + Statement::Seconded(c) => TableStatement::Candidate(c.clone()), + Statement::Valid(h) => TableStatement::Valid(h.clone()), + Statement::Invalid(h) => TableStatement::Invalid(h.clone()), + }; + + TableSignedStatement { + statement, + signature: s.signature().clone(), + sender: s.validator_index(), + } +} + // finds the first key we are capable of signing with out of the given set of validators, // if any. fn signing_key(validators: &[ValidatorId], keystore: &KeyStorePtr) -> Option> { @@ -153,18 +214,17 @@ impl CandidateBackingJob { tx_to, tx_from, - validators: Vec::new(), - validation_code: HashMap::default(), - head_data: HashMap::default(), - signing_key: Arc::new(Pair::from_seed(&[1; 32])), // TODO: No no no - signing_context: SigningContext::default(), - assigned_ids: HashSet::default(), - index: 0, + validation_code: ValidationCode::default(), + head_data: HeadData::default(), + assignment: ParaId::default(), issued_validity: HashSet::default(), + backed_candidates: HashSet::default(), seconded: None, - backing_watchers: HashMap::default(), - valid_votes: HashMap::default(), + backing_watchers: Vec::new(), + + table: Table::default(), + table_context: TableContext::default(), }, rx_from, ) @@ -252,36 +312,30 @@ impl CandidateBackingJob { let roster = self.request_validator_groups().await?; - self.signing_key = signing_key(&validators[..], &self.keystore).unwrap(); - self.index = validators - .iter() - .position(|x| *x == self.signing_key.as_ref().public()) - .ok_or_else(|| Error::NotInValidatorSet)? as u32; + self.table_context.key = signing_key(&validators[..], &self.keystore); - let mut our_groups = HashSet::::default(); - - self.validators = validators; - - for (i, group) in roster.validator_groups.iter().enumerate() { - if group.iter().find(|id| **id == self.index).is_some() { - our_groups.insert((i as u32).into()); - } + for assignment in roster.scheduled { + self.table_context.groups.insert( + assignment.para_id, + roster.validator_groups[assignment.group_idx.0 as usize] + .iter() + .map(|idx| validators[*idx as usize].clone()) + .collect() + ); } - for assignment in roster.scheduled.iter() { - if our_groups.contains(&assignment.group_idx) { - self.assigned_ids.insert(assignment.para_id); - - let validation_code = self.request_validation_code(assignment.para_id, 0, None).await?; - self.validation_code.insert(assignment.para_id, validation_code); - - let head_data = self.request_head_data(assignment.para_id).await?; - - self.head_data.insert(assignment.para_id, head_data); + if let Some(ref key) = self.table_context.key { + for (para_id, group) in self.table_context.groups.iter() { + if group.contains(&key.as_ref().public()) { + self.assignment = *para_id; + } } } - self.signing_context = self.request_signing_context().await?; + self.validation_code = self.request_validation_code(self.assignment, 0, None).await?; + self.head_data = self.request_head_data(self.assignment).await?; + self.table_context.signing_context = self.request_signing_context().await?; + self.table_context.validators = validators; Ok(()) } @@ -316,14 +370,75 @@ impl CandidateBackingJob { Err(_) => Statement::Invalid(candidate.hash()), }; - let signed_statement = SignedFullStatement::sign( - statement, - &self.signing_context, - self.index, - &self.signing_key, - ); + if let Some(ref signing_key) = self.table_context.key { + if let Some(local_index) = self.table_context.local_index() { + let signed_statement = SignedFullStatement::sign( + statement, + &self.table_context.signing_context, + local_index, + signing_key, + ); - self.distribute_signed_statement(signed_statement).await?; + self.distribute_signed_statement(signed_statement).await?; + } + } + + Ok(()) + } + + async fn import_statement(&mut self, statement: SignedFullStatement) -> Result<(), Error> { + let idx = statement.validator_index() as usize; + + if self.table_context.validators.len() > idx { + match statement.check_signature( + &self.table_context.signing_context, + &self.table_context.validators[idx], + ) { + Ok(()) => { + let stmt = primitive_statement_to_table(&statement); + + self.table.import_statement(&self.table_context, stmt); + + let proposed = self.table.proposed_candidates(&self.table_context); + + for p in proposed.into_iter() { + if !self.backed_candidates.contains(&p.candidate.hash()) { + let TableAttestedCandidate { candidate, validity_votes, .. } = p; + + let (ids, validity_votes): (Vec<_>, Vec<_>) = validity_votes + .into_iter() + .map(|(id, vote)| (id, vote.into())) + .unzip(); + + self.backed_candidates.insert(candidate.hash()); + + let mut validator_indices = BitVec::with_capacity( + self.table_context.validators.len(), + ); + + validator_indices.resize(self.table_context.validators.len(), false); + + for id in ids.iter() { + validator_indices.set(*id as usize, true); + } + + let backed = BackedCandidate { + candidate, + validity_votes, + validator_indices, + }; + + for watcher in self.backing_watchers.iter_mut() { + let _ = watcher.send(NewBackedCandidate(backed.clone())).await; + } + } + } + } + Err(()) => { + return Err(Error::ValidationFailed(ValidationFailed)); + } + } + }; Ok(()) } @@ -350,41 +465,11 @@ impl CandidateBackingJob { }, } } - CandidateBackingMessage::Statement(hash, statement) => { - let idx = statement.validator_index() as usize; - - if self.validators.len() < idx { - match statement.check_signature(&self.signing_context, &self.validators[idx]) { - Ok(()) => { - // TODO: check if the validator issuing this statent is a part of the group for the - // `ParaId` in question. - // TODO: check if the validator issuing this statement is not equivocating. - match statement.payload() { - Statement::Seconded(candidate) => { - let hash = candidate.hash(); - self.valid_votes.entry(hash).or_default().push(statement.validator_index()); - } - Statement::Valid(hash) => { - self.valid_votes.entry(*hash).or_default().push(statement.validator_index()); - } - Statement::Invalid(_) => {}, - } - - if let Some(_) = self.valid_votes.get(&hash) { - // TODO: check the quorum. - // How do we go from `ParaId` in the - } - } - Err(()) => { - } - } - } + CandidateBackingMessage::Statement(_, statement) => { + let _ = self.import_statement(statement).await; } - CandidateBackingMessage::RegisterBackingWatcher(hash, tx) => { - match self.backing_watchers.get_mut(&hash) { - Some(watchers) => watchers.push(tx), - None => { self.backing_watchers.insert(hash, vec![tx]); } - } + CandidateBackingMessage::RegisterBackingWatcher(_, tx) => { + self.backing_watchers.push(tx); } } @@ -572,7 +657,9 @@ impl CandidateBackingSubsystem { } FromOverseer::Communication { msg } => { match msg { - CandidateBackingMessage::Second(hash, _) => { + CandidateBackingMessage::Second(hash, _) | + CandidateBackingMessage::Statement(hash, _) | + CandidateBackingMessage::RegisterBackingWatcher(hash, _) => { let _ = jobs.send_msg(hash.clone(), ToJob::CandidateBacking(msg)).await; } _ => (), @@ -612,25 +699,61 @@ impl Subsystem for C mod tests { use super::*; use futures::{channel::mpsc, executor, pin_mut, StreamExt}; - use sp_keyring::sr25519::Keyring; - use primitives::Pair; + use sp_keyring::Sr25519Keyring; use polkadot_primitives::parachain::{ - AssignmentKind, CoreAssignment, BlockData, CoreIndex, + AssignmentKind, CollatorId, CoreAssignment, BlockData, CoreIndex, ValidityAttestation, }; + fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { + val_ids.iter().map(|v| v.public().into()).collect() + } + #[test] fn backing_works() { - let spawner = executor::ThreadPool::new().unwrap(); - let keystore = keystore::Store::new_in_memory(); + let chain_a = ParaId::from(1); + let chain_b = ParaId::from(2); + let thread_a = ParaId::from(3); - let alice_key = Keyring::Alice.pair(); - let alice_id: ValidatorId = alice_key.public().into(); - let bob_key = Keyring::Bob.pair(); - let bob_id: ValidatorId = bob_key.public().into(); + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Ferdie, + ]; + let validator_public = validator_pubkeys(&validators); + + let keystore = keystore::Store::new_in_memory(); // Make sure `Alice` key is in the keystore, so this mocked node will be a parachain validator. - keystore.write().insert_ephemeral_from_seed::(&Keyring::Alice.to_seed()) + keystore.write().insert_ephemeral_from_seed::(&validators[0].to_seed()) .expect("Insert key into keystore"); + let spawner = executor::ThreadPool::new().unwrap(); + + let chain_a_assignment = CoreAssignment { + core: CoreIndex::from(0), + para_id: chain_a, + kind: AssignmentKind::Parachain, + group_idx: GroupIndex::from(0), + }; + + let chain_b_assignment = CoreAssignment { + core: CoreIndex::from(1), + para_id: chain_b, + kind: AssignmentKind::Parachain, + group_idx: GroupIndex::from(1), + }; + + let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); + + let thread_a_assignment = CoreAssignment { + core: CoreIndex::from(2), + para_id: thread_a, + kind: AssignmentKind::Parathread(thread_collator.clone(), 0), + group_idx: GroupIndex::from(2), + }; + + let validator_groups = vec![vec![0, 2], vec![1, 3], vec![4]]; executor::block_on(async move { let parent_hash_1 = [1; 32].into(); @@ -639,14 +762,11 @@ mod tests { let (ctx, outgoing) = SubsystemContext::::new_testing(rx); let roster = SchedulerRoster { - validator_groups: vec![vec![0], vec![1]], + validator_groups, scheduled: vec![ - CoreAssignment { - core: CoreIndex(0), - para_id: 0u32.into(), - kind: AssignmentKind::Parachain, - group_idx: GroupIndex(0), - } + chain_a_assignment, + chain_b_assignment, + thread_a_assignment, ], upcoming: vec![], availability_cores: vec![], @@ -671,7 +791,7 @@ mod tests { AllMessages::RuntimeApi( RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) ) if parent == parent_hash_1 => { - tx.send(vec![alice_id.clone(), bob_id]).unwrap(); + tx.send(validator_public.clone()).unwrap(); } msg => panic!("unexpected message {:?}", msg), } @@ -716,55 +836,114 @@ mod tests { msg => panic!("unexpected message {:?}", msg), } - let mut candidate = AbridgedCandidateReceipt::default(); - candidate.parachain_index = 0.into(); - candidate.relay_parent = parent_hash_1; - candidate.pov_block_hash = [2; 32].into(); - - let second = CandidateBackingMessage::Second(parent_hash_1, candidate.clone()); - - tx.send(FromOverseer::Communication{ msg: second }).await.unwrap(); - - let pov_block = PoVBlock { - block_data: BlockData(vec![42, 43, 44]), - }; + // Check that `Second` message issues all necessary requests. + { + let candidate = AbridgedCandidateReceipt { + parachain_index: chain_a, + relay_parent: parent_hash_1, + pov_block_hash: [2; 32].into(), + ..Default::default() + }; + + let second = CandidateBackingMessage::Second(parent_hash_1, candidate.clone()); + + tx.send(FromOverseer::Communication{ msg: second }).await.unwrap(); + + let pov_block = PoVBlock { + block_data: BlockData(vec![42, 43, 44]), + }; + + match outgoing.next().await.unwrap() { + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::QueryPoV( + pov_hash, + tx, + ) + ) if pov_hash == [2; 32].into() => { + tx.send(Some(pov_block.clone())).unwrap(); + } + msg => panic!("unexpected msg {:?}", msg), + } - match outgoing.next().await.unwrap() { - AllMessages::AvailabilityStore( - AvailabilityStoreMessage::QueryPoV( - pov_hash, - tx, - ) - ) if pov_hash == [2; 32].into() => { - tx.send(Some(pov_block.clone())).unwrap(); + match outgoing.next().await.unwrap() { + AllMessages::CandidateValidation( + CandidateValidationMessage::Validate( + parent_hash, + c, + pov, + tx, + ) + ) if parent_hash == parent_hash_1 && pov == pov_block && c == candidate => { + tx.send(Ok(())).unwrap(); + } + msg => panic!("unexpected msg {:?}", msg), } - msg => panic!("unexpected msg {:?}", msg), - } - match outgoing.next().await.unwrap() { - AllMessages::CandidateValidation( - CandidateValidationMessage::Validate( - parent_hash, - c, - pov, - tx, - ) - ) if parent_hash == parent_hash_1 && pov == pov_block && c == candidate => { - tx.send(Ok(())).unwrap(); + match outgoing.next().await.unwrap() { + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + signed_statement, + ) + ) if parent_hash == parent_hash_1 => { + signed_statement.check_signature(&signing_context, &validator_public[0]).unwrap(); + } + msg => panic!("unexpected message {:?}", msg), } - msg => panic!("unexpected msg {:?}", msg), } - match outgoing.next().await.unwrap() { - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - parent_hash, - signed_statement, - ) - ) if parent_hash == parent_hash_1 => { - signed_statement.check_signature(&signing_context, &alice_id).unwrap(); - } - msg => panic!("unexpected message {:?}", msg), + // Check that reaching a required quorum on a candidate issues a correct message + // to the `BackingWatcher`. + { + let (backed_tx, mut backed_rx) = mpsc::channel(64); + + let register_watcher = CandidateBackingMessage::RegisterBackingWatcher(parent_hash_1, backed_tx); + + tx.send(FromOverseer::Communication{ msg: register_watcher}).await.unwrap(); + + let candidate_a = AbridgedCandidateReceipt { + parachain_index: chain_a, + relay_parent: parent_hash_1, + pov_block_hash: Hash::from([5; 32]), + ..Default::default() + }; + let candidate_a_hash = candidate_a.hash(); + + let signed_a = SignedFullStatement::sign( + Statement::Seconded(candidate_a.clone()), + &signing_context, + 2, + &validators[2].pair().into(), + ); + + let signed_b = SignedFullStatement::sign( + Statement::Valid(candidate_a_hash), + &signing_context, + 0, + &validators[0].pair().into(), + ); + + let statement = CandidateBackingMessage::Statement(parent_hash_1, signed_a.clone()); + + tx.send(FromOverseer::Communication{ msg: statement }).await.unwrap(); + + let statement = CandidateBackingMessage::Statement(parent_hash_1, signed_b.clone()); + + tx.send(FromOverseer::Communication{ msg: statement }).await.unwrap(); + + let backed = backed_rx.next().await.unwrap(); + + // `validity_votes` may be in any order so we can't do this in a single assert. + assert_eq!(backed.0.candidate, candidate_a); + assert_eq!(backed.0.validity_votes.len(), 2); + assert!(backed.0.validity_votes.contains( + &ValidityAttestation::Explicit(signed_b.signature().clone()) + )); + assert!(backed.0.validity_votes.contains( + &ValidityAttestation::Implicit(signed_a.signature().clone()) + )); + + assert_eq!(backed.0.validator_indices, bitvec::bitvec![Lsb0, u8; 1, 0, 1, 0, 0]); } tx.send(FromOverseer::Signal(OverseerSignal::StopWork(parent_hash_1))).await.unwrap(); diff --git a/statement-table/src/generic.rs b/statement-table/src/generic.rs index 83cae9daf26b..0aa72b730726 100644 --- a/statement-table/src/generic.rs +++ b/statement-table/src/generic.rs @@ -28,6 +28,8 @@ use std::collections::hash_map::{HashMap, Entry}; use std::hash::Hash; use std::fmt::Debug; +use primitives::parachain::{ValidityAttestation as PrimitiveValidityAttestation, ValidatorSignature}; + use codec::{Encode, Decode}; /// Context for the statement table. @@ -180,6 +182,15 @@ pub enum ValidityAttestation { Explicit(S), } +impl Into for ValidityAttestation { + fn into(self) -> PrimitiveValidityAttestation { + match self { + Self::Implicit(s) => PrimitiveValidityAttestation::Implicit(s), + Self::Explicit(s) => PrimitiveValidityAttestation::Explicit(s), + } + } +} + /// An attested-to candidate. #[derive(Clone, PartialEq, Decode, Encode)] pub struct AttestedCandidate { From a7d5e867d29664e159a04b04edd54926b519ef1d Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Thu, 2 Jul 2020 12:54:52 +0300 Subject: [PATCH 05/18] use assert_matches --- Cargo.lock | 1 + node/core/backing/Cargo.toml | 1 + node/core/backing/src/lib.rs | 231 ++++++++++++++++++----------------- 3 files changed, 121 insertions(+), 112 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b1abb8c8ec59..a1530a74c661 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -294,6 +294,7 @@ checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" name = "backing" version = "0.1.0" dependencies = [ + "assert_matches", "bitvec", "derive_more 0.99.9", "futures 0.3.5", diff --git a/node/core/backing/Cargo.toml b/node/core/backing/Cargo.toml index 31c0265e7748..12eb1c7633a7 100644 --- a/node/core/backing/Cargo.toml +++ b/node/core/backing/Cargo.toml @@ -26,3 +26,4 @@ bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } futures = { version = "0.3.5", features = ["thread-pool"] } subsystem-test = { package = "polkadot-subsystem-test-helpers", path = "../../test-helpers/subsystem" } +assert_matches = "1.3.0" diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index cddb9539a06d..a8d4269a482c 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -719,6 +719,7 @@ mod tests { use polkadot_primitives::parachain::{ AssignmentKind, CollatorId, CoreAssignment, BlockData, CoreIndex, GroupIndex, ValidityAttestation, }; + use assert_matches::assert_matches; fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { val_ids.iter().map(|v| v.public().into()).collect() @@ -862,171 +863,177 @@ mod tests { } // Check that subsystem job issues a request for the validator groups. - match virtual_overseer.recv().await { + assert_matches!( + virtual_overseer.recv().await, AllMessages::RuntimeApi( RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx)) ) if parent == test_state.relay_parent => { tx.send(test_state.roster.clone()).unwrap(); } - msg => panic!("unexpected message {:?}", msg), - } + ); // Check that subsystem job issues a request for the validation code. - match virtual_overseer.recv().await { + assert_matches!( + virtual_overseer.recv().await, AllMessages::RuntimeApi( RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidationCode(id, _, _, tx)) ) if parent == test_state.relay_parent => { tx.send(test_state.validation_code.get(&id).unwrap().clone()).unwrap(); } - msg => panic!("unexpected message {:?}", msg), - } + ); // Check that subsystem job issues a request for the head data. - match virtual_overseer.recv().await { + assert_matches!( + virtual_overseer.recv().await, AllMessages::RuntimeApi( RuntimeApiMessage::Request(parent, RuntimeApiRequest::HeadData(id, tx)) ) if parent == test_state.relay_parent => { tx.send(test_state.head_data.get(&id).unwrap().clone()).unwrap(); } - msg => panic!("unexpected message {:?}", msg), - } + ); // Check that subsystem job issues a request for the signing context. - match virtual_overseer.recv().await { + assert_matches!( + virtual_overseer.recv().await, AllMessages::RuntimeApi( RuntimeApiMessage::Request(parent, RuntimeApiRequest::SigningContext(tx)) ) if parent == test_state.relay_parent => { tx.send(test_state.signing_context.clone()).unwrap(); } - msg => panic!("unexpected message {:?}", msg), - } + ); } #[test] - fn backing_works() { + fn backing_second_works() { let test_state = TestState::default(); test_harness(test_state.keystore.clone(), |test_harness| async move { - let TestHarness { mut virtual_overseer } = test_harness; test_startup(&mut virtual_overseer, &test_state).await; - // Check that `Second` message issues all necessary requests. - { - let pov_block = PoVBlock { - block_data: BlockData(vec![42, 43, 44]), - }; - - let pov_block_hash = pov_block.hash(); - let candidate = AbridgedCandidateReceipt { - parachain_index: test_state.chain_ids[0], - relay_parent: test_state.relay_parent, - pov_block_hash, - ..Default::default() - }; - - let second = CandidateBackingMessage::Second(test_state.relay_parent, candidate.clone()); - - virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; - - match virtual_overseer.recv().await { - AllMessages::AvailabilityStore( - AvailabilityStoreMessage::QueryPoV( - pov_hash, - tx, - ) - ) if pov_hash == pov_block_hash => { - tx.send(Some(pov_block.clone())).unwrap(); - } - msg => panic!("unexpected msg {:?}", msg), + let pov_block = PoVBlock { + block_data: BlockData(vec![42, 43, 44]), + }; + + let pov_block_hash = pov_block.hash(); + let candidate = AbridgedCandidateReceipt { + parachain_index: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_block_hash, + ..Default::default() + }; + + let second = CandidateBackingMessage::Second(test_state.relay_parent, candidate.clone()); + + virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::QueryPoV( + pov_hash, + tx, + ) + ) if pov_hash == pov_block_hash => { + tx.send(Some(pov_block.clone())).unwrap(); } + ); - match virtual_overseer.recv().await { - AllMessages::CandidateValidation( - CandidateValidationMessage::Validate( - parent_hash, - c, - pov, - tx, - ) - ) if parent_hash == test_state.relay_parent && pov == pov_block && c == candidate => { - tx.send(Ok(())).unwrap(); - } - msg => panic!("unexpected msg {:?}", msg), + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::Validate( + parent_hash, + c, + pov, + tx, + ) + ) if parent_hash == test_state.relay_parent && pov == pov_block && c == candidate => { + tx.send(Ok(())).unwrap(); } + ); - match virtual_overseer.recv().await { - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - parent_hash, - signed_statement, - ) - ) if parent_hash == test_state.relay_parent => { - signed_statement.check_signature( - &test_state.signing_context, - &test_state.validator_public[0], - ).unwrap(); - } - msg => panic!("unexpected message {:?}", msg), + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + signed_statement, + ) + ) if parent_hash == test_state.relay_parent => { + signed_statement.check_signature( + &test_state.signing_context, + &test_state.validator_public[0], + ).unwrap(); } - } + ); + + virtual_overseer.send(FromOverseer::Signal(OverseerSignal::StopWork(test_state.relay_parent))).await; + }); + } + + #[test] + fn backing_works() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + + test_startup(&mut virtual_overseer, &test_state).await; // Check that reaching a required quorum on a candidate issues a correct message // to the `BackingWatcher`. - { - let (backed_tx, mut backed_rx) = mpsc::channel(64); + let (backed_tx, mut backed_rx) = mpsc::channel(64); - let register_watcher = CandidateBackingMessage::RegisterBackingWatcher( - test_state.relay_parent, - backed_tx, - ); + let register_watcher = CandidateBackingMessage::RegisterBackingWatcher( + test_state.relay_parent, + backed_tx, + ); - virtual_overseer.send(FromOverseer::Communication{ msg: register_watcher}).await; - - let candidate_a = AbridgedCandidateReceipt { - parachain_index: test_state.chain_ids[0], - relay_parent: test_state.relay_parent, - pov_block_hash: Hash::from([5; 32]), - ..Default::default() - }; - let candidate_a_hash = candidate_a.hash(); - - let signed_a = SignedFullStatement::sign( - Statement::Seconded(candidate_a.clone()), - &test_state.signing_context, - 2, - &test_state.validators[2].pair().into(), - ); + virtual_overseer.send(FromOverseer::Communication{ msg: register_watcher}).await; - let signed_b = SignedFullStatement::sign( - Statement::Valid(candidate_a_hash), - &test_state.signing_context, - 0, - &test_state.validators[0].pair().into(), - ); + let candidate_a = AbridgedCandidateReceipt { + parachain_index: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_block_hash: Hash::from([5; 32]), + ..Default::default() + }; + let candidate_a_hash = candidate_a.hash(); + + let signed_a = SignedFullStatement::sign( + Statement::Seconded(candidate_a.clone()), + &test_state.signing_context, + 2, + &test_state.validators[2].pair().into(), + ); - let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); + let signed_b = SignedFullStatement::sign( + Statement::Valid(candidate_a_hash), + &test_state.signing_context, + 0, + &test_state.validators[0].pair().into(), + ); - virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); - let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; - virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); - let backed = backed_rx.next().await.unwrap(); + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; - // `validity_votes` may be in any order so we can't do this in a single assert. - assert_eq!(backed.0.candidate, candidate_a); - assert_eq!(backed.0.validity_votes.len(), 2); - assert!(backed.0.validity_votes.contains( - &ValidityAttestation::Explicit(signed_b.signature().clone()) - )); - assert!(backed.0.validity_votes.contains( - &ValidityAttestation::Implicit(signed_a.signature().clone()) - )); + let backed = backed_rx.next().await.unwrap(); - assert_eq!(backed.0.validator_indices, bitvec::bitvec![Lsb0, u8; 1, 0, 1, 0, 0]); - } + // `validity_votes` may be in any order so we can't do this in a single assert. + assert_eq!(backed.0.candidate, candidate_a); + assert_eq!(backed.0.validity_votes.len(), 2); + assert!(backed.0.validity_votes.contains( + &ValidityAttestation::Explicit(signed_b.signature().clone()) + )); + assert!(backed.0.validity_votes.contains( + &ValidityAttestation::Implicit(signed_a.signature().clone()) + )); + + assert_eq!(backed.0.validator_indices, bitvec::bitvec![Lsb0, u8; 1, 0, 1, 0, 0]); virtual_overseer.send(FromOverseer::Signal(OverseerSignal::StopWork(test_state.relay_parent))).await; }); From 697accf4e39d56a7eb53568a74ed93baab919bea Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Thu, 2 Jul 2020 17:04:01 +0300 Subject: [PATCH 06/18] Changes to report misbehaviors --- node/core/backing/src/lib.rs | 116 +++++++++++++++++++++++++++++++-- node/primitives/src/lib.rs | 53 +++++++++++++-- primitives/src/parachain.rs | 15 +++++ statement-table/src/generic.rs | 6 +- 4 files changed, 178 insertions(+), 12 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index a8d4269a482c..e261e19eab83 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -22,6 +22,7 @@ use std::collections::{HashMap, HashSet}; use std::pin::Pin; use std::sync::Arc; use std::time::Duration; +use std::convert::TryFrom; use bitvec::vec::BitVec; use log; @@ -43,19 +44,19 @@ use polkadot_primitives::{ ValidatorIndex, HeadData, ValidationCode, SigningContext, PoVBlock, }, }; -use polkadot_node_primitives::{Statement, SignedFullStatement}; +use polkadot_node_primitives::{Statement, SignedFullStatement, MisbehaviorReport}; use polkadot_subsystem::{ FromOverseer, OverseerSignal, Subsystem, SubsystemContext, SpawnedSubsystem, }; use polkadot_subsystem::messages::{ AllMessages, AvailabilityStoreMessage, CandidateBackingMessage, SchedulerRoster, RuntimeApiMessage, RuntimeApiRequest, CandidateValidationMessage, ValidationFailed, - StatementDistributionMessage, NewBackedCandidate, + StatementDistributionMessage, NewBackedCandidate, ProvisionerMessage, ProvisionableData, }; use statement_table::{ generic::AttestedCandidate as TableAttestedCandidate, - self, Table, Context as TableContextTrait, - Statement as TableStatement, SignedStatement as TableSignedStatement, + Table, Context as TableContextTrait, Statement as TableStatement, + SignedStatement as TableSignedStatement, }; #[derive(Debug, derive_more::From)] @@ -95,6 +96,8 @@ struct CandidateBackingJob { seconded: Option, /// Registered backing watchers. backing_watchers: Vec>, + /// We have already reported misbehaviors for these validators. + reported_misbehavior_for: HashSet, table: Table, table_context: TableContext, @@ -161,6 +164,7 @@ enum FromJob { AvailabilityStoreMessage(AvailabilityStoreMessage), CandidateValidation(CandidateValidationMessage), StatementDistribution(StatementDistributionMessage), + Provisioner(ProvisionerMessage), } impl From for AllMessages { @@ -170,6 +174,7 @@ impl From for AllMessages { FromJob::AvailabilityStoreMessage(msg) => AllMessages::AvailabilityStore(msg), FromJob::CandidateValidation(msg) => AllMessages::CandidateValidation(msg), FromJob::StatementDistribution(msg) => AllMessages::StatementDistribution(msg), + FromJob::Provisioner(msg) => AllMessages::Provisioner(msg), } } } @@ -224,6 +229,7 @@ impl CandidateBackingJob { backed_candidates: HashSet::default(), seconded: None, backing_watchers: Vec::new(), + reported_misbehavior_for: HashSet::default(), table: Table::default(), table_context: TableContext::default(), @@ -435,6 +441,26 @@ impl CandidateBackingJob { } } } + + let mut reports = Vec::new(); + + for (k, v) in self.table.get_misbehavior().iter() { + if !self.reported_misbehavior_for.contains(k) { + self.reported_misbehavior_for.insert(*k); + + if let Ok(report) = MisbehaviorReport::try_from((*k, v.clone())) { + let message = ProvisionerMessage::ProvisionableData( + ProvisionableData::MisbehaviorReport(self.parent, report) + ); + + reports.push(message); + } + } + } + + for report in reports.drain(..) { + self.send_to_provisioner(report).await? + } } Err(()) => { return Err(Error::ValidationFailed(ValidationFailed)); @@ -478,6 +504,12 @@ impl CandidateBackingJob { Ok(()) } + async fn send_to_provisioner(&mut self, msg: ProvisionerMessage) -> Result<(), Error> { + self.tx_from.send(FromJob::Provisioner(msg)).await?; + + Ok(()) + } + async fn request_candidate_validation( &mut self, candidate: AbridgedCandidateReceipt, @@ -997,6 +1029,7 @@ mod tests { pov_block_hash: Hash::from([5; 32]), ..Default::default() }; + let candidate_a_hash = candidate_a.hash(); let signed_a = SignedFullStatement::sign( @@ -1038,4 +1071,79 @@ mod tests { virtual_overseer.send(FromOverseer::Signal(OverseerSignal::StopWork(test_state.relay_parent))).await; }); } + + #[test] + fn backing_misbehavior_works() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + + test_startup(&mut virtual_overseer, &test_state).await; + + let candidate_a = AbridgedCandidateReceipt { + parachain_index: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_block_hash: Hash::from([5; 32]), + ..Default::default() + }; + + let candidate_a_hash = candidate_a.hash(); + + let signed_a = SignedFullStatement::sign( + Statement::Seconded(candidate_a.clone()), + &test_state.signing_context, + 2, + &test_state.validators[2].pair().into(), + ); + + let signed_b = SignedFullStatement::sign( + Statement::Valid(candidate_a_hash), + &test_state.signing_context, + 0, + &test_state.validators[0].pair().into(), + ); + + let signed_c = SignedFullStatement::sign( + Statement::Invalid(candidate_a_hash), + &test_state.signing_context, + 0, + &test_state.validators[0].pair().into(), + ); + + let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); + + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); + + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_c.clone()); + + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + ProvisionableData::MisbehaviorReport( + relay_parent, + MisbehaviorReport::SelfContradiction(_, s1, s2), + ) + ) + ) if relay_parent == test_state.relay_parent => { + s1.check_signature( + &test_state.signing_context, + &test_state.validator_public[s1.validator_index() as usize], + ).unwrap(); + + s2.check_signature( + &test_state.signing_context, + &test_state.validator_public[s2.validator_index() as usize], + ).unwrap(); + } + ); + + }); + } } diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index 527e6aaea274..873754422494 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -23,10 +23,14 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_primitives::{Hash, parachain::{ - AbridgedCandidateReceipt, CandidateReceipt, CompactStatement, - EncodeAs, Signed, + AbridgedCandidateReceipt, CompactStatement, + EncodeAs, Signed, ValidatorIndex, } }; +use polkadot_statement_table::{ + self as table, + Misbehavior as TableMisbehavior, +}; /// A statement, where the candidate receipt is included in the `Seconded` variant. #[derive(Debug, Clone, PartialEq, Encode, Decode)] @@ -74,11 +78,50 @@ pub enum MisbehaviorReport { /// this message should be dispatched with all of them, in arbitrary order. /// /// This variant is also used when our own validity checks disagree with others'. - CandidateValidityDisagreement(CandidateReceipt, Vec), + CandidateValidityDisagreement(AbridgedCandidateReceipt, Vec), /// I've noticed a peer contradicting itself about a particular candidate - SelfContradiction(CandidateReceipt, SignedFullStatement, SignedFullStatement), + SelfContradiction(AbridgedCandidateReceipt, SignedFullStatement, SignedFullStatement), /// This peer has seconded more than one parachain candidate for this relay parent head - DoubleVote(CandidateReceipt, SignedFullStatement, SignedFullStatement), + DoubleVote(AbridgedCandidateReceipt, SignedFullStatement, SignedFullStatement), +} + +impl std::convert::TryFrom<(ValidatorIndex, TableMisbehavior)> for MisbehaviorReport { + type Error = (); + + fn try_from(a: (ValidatorIndex, TableMisbehavior)) -> Result { + let (id, report) = a; + + match report { + TableMisbehavior::ValidityDoubleVote(dv) => { + match dv { + table::generic::ValidityDoubleVote::IssuedAndValidity((c, s1), (d, s2)) => { + let receipt = c.clone(); + let signed_1 = SignedFullStatement::new(Statement::Seconded(c), id, s1); + let signed_2 = SignedFullStatement::new(Statement::Valid(d), id, s2); + + Ok(MisbehaviorReport::DoubleVote(receipt, signed_1, signed_2)) + } + table::generic::ValidityDoubleVote::IssuedAndInvalidity((c, s1), (d, s2)) => { + let receipt = c.clone(); + let signed_1 = SignedFullStatement::new(Statement::Seconded(c), id, s1); + let signed_2 = SignedFullStatement::new(Statement::Invalid(d), id, s2); + + Ok(MisbehaviorReport::DoubleVote(receipt, signed_1, signed_2)) + } + table::generic::ValidityDoubleVote::ValidityAndInvalidity(c, s1, s2) => { + let signed_1 = SignedFullStatement::new(Statement::Valid(c.hash()), id, s1); + let signed_2 = SignedFullStatement::new(Statement::Invalid(c.hash()), id, s2); + + Ok(MisbehaviorReport::SelfContradiction(c, signed_1, signed_2)) + } + } + } + _ => { + // TODO: match other cases + Err(()) + } + } + } } /// A unique identifier for a network protocol. diff --git a/primitives/src/parachain.rs b/primitives/src/parachain.rs index 7939c227b55b..1247f47dcd06 100644 --- a/primitives/src/parachain.rs +++ b/primitives/src/parachain.rs @@ -981,6 +981,21 @@ impl, RealPayload: Encode> Signed Self { + Self { + payload, + validator_index, + signature, + real_payload: std::marker::PhantomData, + } + } + /// Sign this payload with the given context and key, storing the validator index. #[cfg(feature = "std")] pub fn sign( diff --git a/statement-table/src/generic.rs b/statement-table/src/generic.rs index 0aa72b730726..11c6a3c02478 100644 --- a/statement-table/src/generic.rs +++ b/statement-table/src/generic.rs @@ -100,7 +100,7 @@ pub enum ValidityDoubleVote { /// Implicit vote by issuing and explicitly voting invalidity IssuedAndInvalidity((C, S), (D, S)), /// Direct votes for validity and invalidity - ValidityAndInvalidity(D, S, S), + ValidityAndInvalidity(C, S, S), } /// Misbehavior: multiple signatures on same statement. @@ -561,7 +561,7 @@ impl Table { // valid vote conflicting with invalid vote (ValidityVote::Valid(good), ValidityVote::Invalid(bad)) | (ValidityVote::Invalid(bad), ValidityVote::Valid(good)) => - make_vdv(ValidityDoubleVote::ValidityAndInvalidity(digest, good, bad)), + make_vdv(ValidityDoubleVote::ValidityAndInvalidity(votes.candidate.clone(), good, bad)), // two signatures on same candidate (ValidityVote::Issued(a), ValidityVote::Issued(b)) => @@ -828,7 +828,7 @@ mod tests { assert_eq!( table.detected_misbehavior.get(&AuthorityId(2)).unwrap(), &Misbehavior::ValidityDoubleVote(ValidityDoubleVote::ValidityAndInvalidity( - candidate_digest, + Candidate(2, 100), Signature(2), Signature(2), )) From 2ce747eeff078b2a0ab67a8ff59a509097e8a51d Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Fri, 3 Jul 2020 19:24:16 +0300 Subject: [PATCH 07/18] Some fixes after a review --- Cargo.lock | 48 +-- node/core/backing/Cargo.toml | 2 +- node/core/backing/src/lib.rs | 676 ++++++++++++++++++++++++--------- node/primitives/src/lib.rs | 106 ++++-- node/subsystem/src/messages.rs | 26 +- primitives/src/parachain.rs | 69 +++- 6 files changed, 675 insertions(+), 252 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a1530a74c661..ac5ed0590706 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -290,30 +290,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" -[[package]] -name = "backing" -version = "0.1.0" -dependencies = [ - "assert_matches", - "bitvec", - "derive_more 0.99.9", - "futures 0.3.5", - "futures-timer 3.0.2", - "log 0.4.8", - "polkadot-node-primitives", - "polkadot-node-subsystem", - "polkadot-primitives", - "polkadot-statement-table", - "polkadot-subsystem-test-helpers", - "sc-client-api", - "sc-keystore", - "sp-api", - "sp-blockchain", - "sp-core", - "sp-keyring", - "streamunordered", -] - [[package]] name = "backtrace" version = "0.3.49" @@ -4433,6 +4409,30 @@ dependencies = [ "sp-runtime", ] +[[package]] +name = "polkadot-node-core-backing" +version = "0.1.0" +dependencies = [ + "assert_matches", + "bitvec", + "derive_more 0.99.9", + "futures 0.3.5", + "futures-timer 3.0.2", + "log 0.4.8", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-primitives", + "polkadot-statement-table", + "polkadot-subsystem-test-helpers", + "sc-client-api", + "sc-keystore", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-keyring", + "streamunordered", +] + [[package]] name = "polkadot-node-primitives" version = "0.1.0" diff --git a/node/core/backing/Cargo.toml b/node/core/backing/Cargo.toml index 12eb1c7633a7..bf859e327427 100644 --- a/node/core/backing/Cargo.toml +++ b/node/core/backing/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "backing" +name = "polkadot-node-core-backing" version = "0.1.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index e261e19eab83..f1da80412dec 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -19,10 +19,10 @@ #![recursion_limit="256"] use std::collections::{HashMap, HashSet}; +use std::convert::TryFrom; +use std::iter::FromIterator; use std::pin::Pin; -use std::sync::Arc; use std::time::Duration; -use std::convert::TryFrom; use bitvec::vec::BitVec; use log; @@ -30,7 +30,7 @@ use futures::{ select, FutureExt, SinkExt, StreamExt, channel::{oneshot, mpsc}, future::{self, Either}, - task::{Spawn, SpawnExt}, + task::{Spawn, SpawnError, SpawnExt}, }; use futures_timer::Delay; use streamunordered::{StreamUnordered, StreamYield}; @@ -41,17 +41,21 @@ use polkadot_primitives::{ Hash, BlockNumber, parachain::{ AbridgedCandidateReceipt, BackedCandidate, Id as ParaId, ValidatorPair, ValidatorId, - ValidatorIndex, HeadData, ValidationCode, SigningContext, PoVBlock, + ValidatorIndex, HeadData, ValidationCode, SigningContext, PoVBlock, ValidationResult, + CandidateDescriptor, }, }; -use polkadot_node_primitives::{Statement, SignedFullStatement, MisbehaviorReport}; +use polkadot_node_primitives::{ + FromTableMisbehavior, Statement, SignedFullStatement, MisbehaviorReport, +}; use polkadot_subsystem::{ FromOverseer, OverseerSignal, Subsystem, SubsystemContext, SpawnedSubsystem, }; use polkadot_subsystem::messages::{ - AllMessages, AvailabilityStoreMessage, CandidateBackingMessage, SchedulerRoster, + AllMessages, CandidateBackingMessage, CandidateSelectionMessage, SchedulerRoster, RuntimeApiMessage, RuntimeApiRequest, CandidateValidationMessage, ValidationFailed, StatementDistributionMessage, NewBackedCandidate, ProvisionerMessage, ProvisionableData, + PoVDistributionMessage, }; use statement_table::{ generic::AttestedCandidate as TableAttestedCandidate, @@ -61,12 +65,15 @@ use statement_table::{ #[derive(Debug, derive_more::From)] enum Error { + InvalidSignature, #[from] ValidationFailed(ValidationFailed), #[from] Oneshot(oneshot::Canceled), #[from] Mpsc(mpsc::SendError), + #[from] + Spawn(SpawnError), } /// Holds all data needed for candidate backing job operation. @@ -90,12 +97,8 @@ struct CandidateBackingJob { assignment: ParaId, /// We issued `Valid` statements on about these candidates. issued_validity: HashSet, - /// We sent `NewBackedCandidate` on these candidates. - backed_candidates: HashSet, /// `Some(h)` if this job has already issues `Seconded` statemt for some candidate with `h` hash. seconded: Option, - /// Registered backing watchers. - backing_watchers: Vec>, /// We have already reported misbehaviors for these validators. reported_misbehavior_for: HashSet, @@ -103,32 +106,27 @@ struct CandidateBackingJob { table_context: TableContext, } -const fn needed_votes(n_validators: usize) -> usize { - let mut threshold = (n_validators * 2) / 3; - threshold += (n_validators * 2) % 3; +const fn group_quorum(n_validators: usize) -> usize { + let mut threshold = (n_validators) / 2; + threshold += 1; threshold } #[derive(Default)] struct TableContext { signing_context: SigningContext, - key: Option>, - groups: HashMap>, + key: Option, + groups: HashMap>, validators: Vec, } impl TableContextTrait for TableContext { fn is_member_of(&self, authority: ValidatorIndex, group: &ParaId) -> bool { - let key = match self.validators.get(authority as usize) { - Some(val) => val, - None => return false, - }; - - self.groups.get(group).map_or(false, |g| g.get(&key).is_some()) + self.groups.get(group).map_or(false, |g| g.get(&authority).is_some()) } fn requisite_votes(&self, group: &ParaId) -> usize { - self.groups.get(group).map_or(usize::max_value(), |g| needed_votes(g.len())) + self.groups.get(group).map_or(usize::max_value(), |g| group_quorum(g.len())) } } @@ -161,19 +159,21 @@ enum ToJob { /// A message type that is sent from `CandidateBackingJob` to `CandidateBackingSubsystem`. enum FromJob { RuntimeApiMessage(RuntimeApiMessage), - AvailabilityStoreMessage(AvailabilityStoreMessage), CandidateValidation(CandidateValidationMessage), - StatementDistribution(StatementDistributionMessage), + CandidateSelection(CandidateSelectionMessage), Provisioner(ProvisionerMessage), + PoVDistribution(PoVDistributionMessage), + StatementDistribution(StatementDistributionMessage), } impl From for AllMessages { fn from(f: FromJob) -> Self { match f { FromJob::RuntimeApiMessage(msg) => AllMessages::RuntimeApi(msg), - FromJob::AvailabilityStoreMessage(msg) => AllMessages::AvailabilityStore(msg), FromJob::CandidateValidation(msg) => AllMessages::CandidateValidation(msg), + FromJob::CandidateSelection(msg) => AllMessages::CandidateSelection(msg), FromJob::StatementDistribution(msg) => AllMessages::StatementDistribution(msg), + FromJob::PoVDistribution(msg) => AllMessages::PoVDistribution(msg), FromJob::Provisioner(msg) => AllMessages::Provisioner(msg), } } @@ -197,13 +197,13 @@ fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement // finds the first key we are capable of signing with out of the given set of validators, // if any. -fn signing_key(validators: &[ValidatorId], keystore: &KeyStorePtr) -> Option> { +fn signing_key(validators: &[ValidatorId], keystore: &KeyStorePtr) -> Option { let keystore = keystore.read(); validators.iter() .find_map(|v| { keystore.key_pair::(&v).ok() }) - .map(|pair| Arc::new(pair)) + .map(|pair| pair) } impl CandidateBackingJob { @@ -226,9 +226,7 @@ impl CandidateBackingJob { assignment: ParaId::default(), issued_validity: HashSet::default(), - backed_candidates: HashSet::default(), seconded: None, - backing_watchers: Vec::new(), reported_misbehavior_for: HashSet::default(), table: Table::default(), @@ -323,19 +321,22 @@ impl CandidateBackingJob { self.table_context.key = signing_key(&validators[..], &self.keystore); for assignment in roster.scheduled { - self.table_context.groups.insert( - assignment.para_id, - roster.validator_groups[assignment.group_idx.0 as usize] - .iter() - .map(|idx| validators[*idx as usize].clone()) - .collect() - ); + if let Some(groups) = roster.validator_groups.get(assignment.group_idx.0 as usize) { + self.table_context.groups.insert( + assignment.para_id, + HashSet::from_iter(groups.iter().copied()), + ); + } } if let Some(ref key) = self.table_context.key { - for (para_id, group) in self.table_context.groups.iter() { - if group.contains(&key.as_ref().public()) { - self.assignment = *para_id; + if let Some(idx) = validators.iter().position(|k| *k == key.public()) { + let idx = idx as u32; + for (para_id, group) in self.table_context.groups.iter() { + if group.contains(&idx) { + self.assignment = *para_id; + break; + } } } } @@ -364,103 +365,125 @@ impl CandidateBackingJob { Ok(()) } + async fn issue_candidate_invalid_message( + &mut self, + candidate: AbridgedCandidateReceipt, + ) -> Result<(), Error> { + self.tx_from.send(FromJob::CandidateSelection( + CandidateSelectionMessage::Invalid(self.parent, candidate) + )).await?; + + Ok(()) + } + /// Validate the candidate that is requested to be `Second`ed and distribute validation result. - async fn validate_and_second(&mut self, candidate: AbridgedCandidateReceipt) -> Result<(), Error> { - let statement = match self.spawn_validation_work(candidate.clone()).await { - Ok(()) => { + async fn validate_and_second( + &mut self, + candidate: AbridgedCandidateReceipt, + pov: PoVBlock, + ) -> Result { + let valid = self.request_candidate_validation(candidate.clone(), pov).await?; + let statement = match valid { + ValidationResult::Valid => { // make PoV available for later distribution. Send data to the availability // store to keep. Sign and dispatch `valid` statement to network if we // have not seconded the given candidate. Statement::Seconded(candidate) } - - // sign and dispatch `invalid` statement to network. - Err(_) => Statement::Invalid(candidate.hash()), + ValidationResult::Invalid => { + let candidate_hash = candidate.hash(); + self.issue_candidate_invalid_message(candidate).await?; + Statement::Invalid(candidate_hash) + } }; - if let Some(ref signing_key) = self.table_context.key { - if let Some(local_index) = self.table_context.local_index() { - let signed_statement = SignedFullStatement::sign( - statement, - &self.table_context.signing_context, - local_index, - signing_key, - ); - - self.distribute_signed_statement(signed_statement).await?; - } + if let Some(signed_statement) = self.sign_statement(statement) { + self.import_statement(signed_statement.clone()).await?; + self.distribute_signed_statement(signed_statement).await?; } - Ok(()) + Ok(valid) } - async fn import_statement(&mut self, statement: SignedFullStatement) -> Result<(), Error> { - let idx = statement.validator_index() as usize; + fn get_backed(&self) -> Vec { + let proposed = self.table.proposed_candidates(&self.table_context); + let mut res = Vec::with_capacity(proposed.len()); - if self.table_context.validators.len() > idx { - match statement.check_signature( - &self.table_context.signing_context, - &self.table_context.validators[idx], - ) { - Ok(()) => { - let stmt = primitive_statement_to_table(&statement); + for p in proposed.into_iter() { + let TableAttestedCandidate { candidate, validity_votes, .. } = p; - self.table.import_statement(&self.table_context, stmt); + let (ids, validity_votes): (Vec<_>, Vec<_>) = validity_votes + .into_iter() + .map(|(id, vote)| (id, vote.into())) + .unzip(); - let proposed = self.table.proposed_candidates(&self.table_context); + let mut validator_indices = BitVec::with_capacity( + self.table_context.validators.len(), + ); - for p in proposed.into_iter() { - if !self.backed_candidates.contains(&p.candidate.hash()) { - let TableAttestedCandidate { candidate, validity_votes, .. } = p; + validator_indices.resize(self.table_context.validators.len(), false); - let (ids, validity_votes): (Vec<_>, Vec<_>) = validity_votes - .into_iter() - .map(|(id, vote)| (id, vote.into())) - .unzip(); + for id in ids.iter() { + validator_indices.set(*id as usize, true); + } - self.backed_candidates.insert(candidate.hash()); + let backed = BackedCandidate { + candidate, + validity_votes, + validator_indices, + }; - let mut validator_indices = BitVec::with_capacity( - self.table_context.validators.len(), - ); + res.push(NewBackedCandidate(backed.clone())); + } - validator_indices.resize(self.table_context.validators.len(), false); + res + } - for id in ids.iter() { - validator_indices.set(*id as usize, true); - } + async fn issue_new_misbehaviors(&mut self) -> Result<(), Error> { + let mut reports = Vec::new(); - let backed = BackedCandidate { - candidate, - validity_votes, - validator_indices, - }; + for (k, v) in self.table.get_misbehavior().iter() { + if !self.reported_misbehavior_for.contains(k) { + self.reported_misbehavior_for.insert(*k); - for watcher in self.backing_watchers.iter_mut() { - let _ = watcher.send(NewBackedCandidate(backed.clone())).await; - } - } - } + let f = FromTableMisbehavior { + id: *k, + report: v.clone(), + signing_context: self.table_context.signing_context.clone(), + key: self.table_context.validators[*k as usize].clone(), + }; - let mut reports = Vec::new(); + if let Ok(report) = MisbehaviorReport::try_from(f) { + let message = ProvisionerMessage::ProvisionableData( + ProvisionableData::MisbehaviorReport(self.parent, report) + ); - for (k, v) in self.table.get_misbehavior().iter() { - if !self.reported_misbehavior_for.contains(k) { - self.reported_misbehavior_for.insert(*k); + reports.push(message); + } + } + } - if let Ok(report) = MisbehaviorReport::try_from((*k, v.clone())) { - let message = ProvisionerMessage::ProvisionableData( - ProvisionableData::MisbehaviorReport(self.parent, report) - ); + for report in reports.drain(..) { + self.send_to_provisioner(report).await? + } - reports.push(message); - } - } - } + Ok(()) + } - for report in reports.drain(..) { - self.send_to_provisioner(report).await? - } + async fn import_statement(&mut self, statement: SignedFullStatement) -> Result<(), Error> { + let idx = statement.validator_index() as usize; + + if self.table_context.validators.len() > idx { + match statement.check_signature( + &self.table_context.signing_context, + &self.table_context.validators[idx], + ) { + Ok(()) => { + let stmt = primitive_statement_to_table(&statement); + + self.table.import_statement(&self.table_context, stmt); + + self.issue_new_misbehaviors().await?; } Err(()) => { return Err(Error::ValidationFailed(ValidationFailed)); @@ -473,7 +496,7 @@ impl CandidateBackingJob { async fn process_msg(&mut self, msg: CandidateBackingMessage) -> Result<(), Error> { match msg { - CandidateBackingMessage::Second(_, candidate) => { + CandidateBackingMessage::Second(_, candidate, pov) => { // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a // Seconded statement only if we have not seconded any other candidate and // have not signed a Valid statement for the requested candidate. @@ -483,38 +506,108 @@ impl CandidateBackingJob { let candidate_hash = candidate.hash(); if !self.issued_validity.contains(&candidate_hash) { - if let Ok(()) = self.validate_and_second(candidate).await { + if let Ok(ValidationResult::Valid) = self.validate_and_second( + candidate, + pov, + ).await { self.seconded = Some(candidate_hash); } } } // This job has already seconded a candidate. - Some(_) => { - }, + Some(_) => {} } } CandidateBackingMessage::Statement(_, statement) => { - let _ = self.import_statement(statement).await; + self.maybe_validate_and_import(statement).await?; } - CandidateBackingMessage::RegisterBackingWatcher(_, tx) => { - self.backing_watchers.push(tx); + CandidateBackingMessage::GetBackedCandidates(_, tx) => { + let backed = self.get_backed(); + + tx.send(backed).map_err(|_| oneshot::Canceled)?; } } Ok(()) } + async fn maybe_validate_and_import( + &mut self, + statement: SignedFullStatement, + ) -> Result<(), Error> { + let idx = statement.validator_index() as usize; + + if self.table_context.validators.len() > idx { + if let Err(()) = statement.check_signature( + &self.table_context.signing_context, + &self.table_context.validators[idx], + ) { + return Err(Error::InvalidSignature); + } + } else { + return Err(Error::InvalidSignature); + } + + if let Statement::Seconded(candidate) = statement.payload() { + if candidate.parachain_index == self.assignment { + let pov = self.request_pov_from_distribution(candidate.clone().into()).await?; + let v = self.request_candidate_validation(candidate.clone(), pov).await?; + + let statement = match v { + ValidationResult::Valid => Statement::Valid(candidate.hash()), + ValidationResult::Invalid => Statement::Invalid(candidate.hash()), + }; + + if let Some(signed_statement) = self.sign_statement(statement) { + self.distribute_signed_statement(signed_statement).await?; + } + } + } + + self.import_statement(statement).await?; + + Ok(()) + } + + fn sign_statement(&self, statement: Statement) -> Option { + let local_index = self.table_context.local_index()?; + + let signing_key = self.table_context.key.as_ref()?; + + let signed_statement = SignedFullStatement::sign( + statement, + &self.table_context.signing_context, + local_index, + signing_key, + ); + + Some(signed_statement) + } + async fn send_to_provisioner(&mut self, msg: ProvisionerMessage) -> Result<(), Error> { self.tx_from.send(FromJob::Provisioner(msg)).await?; Ok(()) } + async fn request_pov_from_distribution( + &mut self, + descriptor: CandidateDescriptor, + ) -> Result { + let (tx, rx) = oneshot::channel(); + + self.tx_from.send(FromJob::PoVDistribution( + PoVDistributionMessage::FetchPoV(self.parent, descriptor, tx) + )).await?; + + Ok(rx.await?) + } + async fn request_candidate_validation( &mut self, candidate: AbridgedCandidateReceipt, pov: PoVBlock, - ) -> Result<(), Error> { + ) -> Result { let (tx, rx) = oneshot::channel(); self.tx_from.send(FromJob::CandidateValidation( @@ -527,9 +620,7 @@ impl CandidateBackingJob { ) ).await?; - rx.await??; - - Ok(()) + Ok(rx.await??) } async fn distribute_signed_statement(&mut self, s: SignedFullStatement) -> Result<(), Error> { @@ -539,25 +630,6 @@ impl CandidateBackingJob { Ok(()) } - - async fn spawn_validation_work( - &mut self, - candidate: AbridgedCandidateReceipt, - ) -> Result<(), Error> { - let (tx, rx) = oneshot::channel(); - - self.tx_from.send(FromJob::AvailabilityStoreMessage( - AvailabilityStoreMessage::QueryPoV( - candidate.pov_block_hash, - tx, - ) - ) - ).await?; - - let pov = rx.await?.unwrap(); - - self.request_candidate_validation(candidate, pov).await - } } struct JobHandle { @@ -601,14 +673,14 @@ impl Jobs { } } - fn spawn_job(&mut self, parent_hash: Hash, keystore: KeyStorePtr) -> Result<(), ()> { + fn spawn_job(&mut self, parent_hash: Hash, keystore: KeyStorePtr) -> Result<(), Error> { let (mut job, from_job) = CandidateBackingJob::new(parent_hash,keystore); let to_job = job.tx(); let (future, abort_handle) = future::abortable(async move { - if let Err(_) = job.run().await { - log::error!("Job returned an error"); + if let Err(e) = job.run().await { + log::error!("Job terminated with an an error: {:?}", e); } }); let (finished_tx, finished) = oneshot::channel(); @@ -617,7 +689,7 @@ impl Jobs { let _ = future.await; let _ = finished_tx.send(()); }; - self.spawner.spawn(future).map_err(|_| ())?; + self.spawner.spawn(future)?; let su_handle = self.outgoing_msgs.push(from_job); @@ -644,10 +716,11 @@ impl Jobs { } } - async fn send_msg(&mut self, parent_hash: Hash, msg: ToJob) { + async fn send_msg(&mut self, parent_hash: Hash, msg: ToJob) -> Result<(), Error> { if let Some(job) = self.running.get_mut(&parent_hash) { - let _ = job.send_msg(msg).await; + job.send_msg(msg).await?; } + Ok(()) } async fn next(&mut self) -> Option { @@ -690,17 +763,35 @@ impl CandidateBackingSubsystem match incoming { Ok(msg) => match msg { FromOverseer::Signal(OverseerSignal::StartWork(hash)) => { - let _ = jobs.spawn_job(hash, keystore.clone()); + if let Err(e) = jobs.spawn_job(hash, keystore.clone()) { + log::error!("Failed to spawn a job: {:?}", e); + break; + } } FromOverseer::Signal(OverseerSignal::StopWork(hash)) => { - let _ = jobs.stop_job(hash); + if let Err(e) = jobs.stop_job(hash) { + log::error!("Failed to spawn a job: {:?}", e); + break; + } } FromOverseer::Communication { msg } => { match msg { - CandidateBackingMessage::Second(hash, _) | + CandidateBackingMessage::Second(hash, _, _) | CandidateBackingMessage::Statement(hash, _) | - CandidateBackingMessage::RegisterBackingWatcher(hash, _) => { - let _ = jobs.send_msg(hash.clone(), ToJob::CandidateBacking(msg)).await; + CandidateBackingMessage::GetBackedCandidates(hash, _) => { + let res = jobs.send_msg( + hash.clone(), + ToJob::CandidateBacking(msg), + ).await; + + if let Err(e) = res { + log::error!( + "Failed to send a message to a job: {:?}", + e, + ); + + break; + } } _ => (), } @@ -742,10 +833,7 @@ impl Subsystem for CandidateBackingSubsystem #[cfg(test)] mod tests { use super::*; - use futures::{ - channel::mpsc, Future, - executor::{self, ThreadPool}, - }; + use futures::{Future, executor::{self, ThreadPool}}; use std::collections::HashMap; use sp_keyring::Sr25519Keyring; use polkadot_primitives::parachain::{ @@ -877,6 +965,7 @@ mod tests { executor::block_on(future::select(test_fut, subsystem)); } + // Tests that the subsystem performs actions that are requied on startup. async fn test_startup( virtual_overseer: &mut subsystem_test::TestSubsystemContextHandle, test_state: &TestState, @@ -935,6 +1024,8 @@ mod tests { ); } + // Test that a `CandidateBackingMessage::Second` issues validation work + // and in case validation is successful issues a `StatementDistributionMessage`. #[test] fn backing_second_works() { let test_state = TestState::default(); @@ -955,22 +1046,14 @@ mod tests { ..Default::default() }; - let second = CandidateBackingMessage::Second(test_state.relay_parent, candidate.clone()); + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate.clone(), + pov_block.clone(), + ); virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::AvailabilityStore( - AvailabilityStoreMessage::QueryPoV( - pov_hash, - tx, - ) - ) if pov_hash == pov_block_hash => { - tx.send(Some(pov_block.clone())).unwrap(); - } - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( @@ -980,8 +1063,9 @@ mod tests { pov, tx, ) - ) if parent_hash == test_state.relay_parent && pov == pov_block && c == candidate => { - tx.send(Ok(())).unwrap(); + ) if parent_hash == test_state.relay_parent && + pov == pov_block && c == candidate => { + tx.send(Ok(ValidationResult::Valid)).unwrap(); } ); @@ -1000,10 +1084,13 @@ mod tests { } ); - virtual_overseer.send(FromOverseer::Signal(OverseerSignal::StopWork(test_state.relay_parent))).await; + virtual_overseer.send(FromOverseer::Signal( + OverseerSignal::StopWork(test_state.relay_parent)) + ).await; }); } + // Test that the candidate reaches quorum succesfully. #[test] fn backing_works() { let test_state = TestState::default(); @@ -1012,21 +1099,16 @@ mod tests { test_startup(&mut virtual_overseer, &test_state).await; - // Check that reaching a required quorum on a candidate issues a correct message - // to the `BackingWatcher`. - let (backed_tx, mut backed_rx) = mpsc::channel(64); - - let register_watcher = CandidateBackingMessage::RegisterBackingWatcher( - test_state.relay_parent, - backed_tx, - ); + let pov_block = PoVBlock { + block_data: BlockData(vec![1, 2, 3]), + }; - virtual_overseer.send(FromOverseer::Communication{ msg: register_watcher}).await; + let pov_block_hash = pov_block.hash(); let candidate_a = AbridgedCandidateReceipt { parachain_index: test_state.chain_ids[0], relay_parent: test_state.relay_parent, - pov_block_hash: Hash::from([5; 32]), + pov_block_hash, ..Default::default() }; @@ -1050,28 +1132,73 @@ mod tests { virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; - let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); + // Sending a `Statement::Seconded` for our assignment will start + // validation process. The first thing requested is PoVBlock from the + // `PoVDistribution`. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::PoVDistribution( + PoVDistributionMessage::FetchPoV(relay_parent, _, tx) + ) if relay_parent == test_state.relay_parent => { + tx.send(pov_block.clone()).unwrap(); + } + ); + + // The next step is the actual request to Validation subsystem + // to validate the `Seconded` candidate. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::Validate( + relay_parent, + candidate, + pov, + tx, + ) + ) if relay_parent == test_state.relay_parent && candidate == candidate_a => { + assert_eq!(pov, pov_block); + tx.send(Ok(ValidationResult::Valid)).unwrap(); + } + ); + + let statement = CandidateBackingMessage::Statement( + test_state.relay_parent, + signed_b.clone(), + ); virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; - let backed = backed_rx.next().await.unwrap(); + let (tx, rx) = oneshot::channel(); + + // The backed candidats set should be not empty at this point. + virtual_overseer.send(FromOverseer::Communication{ + msg: CandidateBackingMessage::GetBackedCandidates( + test_state.relay_parent, + tx, + ) + }).await; + + let backed = rx.await.unwrap(); // `validity_votes` may be in any order so we can't do this in a single assert. - assert_eq!(backed.0.candidate, candidate_a); - assert_eq!(backed.0.validity_votes.len(), 2); - assert!(backed.0.validity_votes.contains( + assert_eq!(backed[0].0.candidate, candidate_a); + assert_eq!(backed[0].0.validity_votes.len(), 2); + assert!(backed[0].0.validity_votes.contains( &ValidityAttestation::Explicit(signed_b.signature().clone()) )); - assert!(backed.0.validity_votes.contains( + assert!(backed[0].0.validity_votes.contains( &ValidityAttestation::Implicit(signed_a.signature().clone()) )); + assert_eq!(backed[0].0.validator_indices, bitvec::bitvec![Lsb0, u8; 1, 0, 1, 0, 0]); - assert_eq!(backed.0.validator_indices, bitvec::bitvec![Lsb0, u8; 1, 0, 1, 0, 0]); - - virtual_overseer.send(FromOverseer::Signal(OverseerSignal::StopWork(test_state.relay_parent))).await; + virtual_overseer.send(FromOverseer::Signal( + OverseerSignal::StopWork(test_state.relay_parent)) + ).await; }); } + // Issuing conflicting statements on the same candidate should + // be a misbehavior. #[test] fn backing_misbehavior_works() { let test_state = TestState::default(); @@ -1080,10 +1207,15 @@ mod tests { test_startup(&mut virtual_overseer, &test_state).await; + let pov_block = PoVBlock { + block_data: BlockData(vec![1, 2, 3]), + }; + + let pov_block_hash = pov_block.hash(); let candidate_a = AbridgedCandidateReceipt { parachain_index: test_state.chain_ids[0], relay_parent: test_state.relay_parent, - pov_block_hash: Hash::from([5; 32]), + pov_block_hash, ..Default::default() }; @@ -1114,6 +1246,47 @@ mod tests { virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::PoVDistribution( + PoVDistributionMessage::FetchPoV(relay_parent, _, tx) + ) if relay_parent == test_state.relay_parent => { + tx.send(pov_block.clone()).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::Validate( + relay_parent, + candidate, + pov, + tx, + ) + ) if relay_parent == test_state.relay_parent && candidate == candidate_a => { + assert_eq!(pov, pov_block); + tx.send(Ok(ValidationResult::Valid)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + relay_parent, + signed_statement, + ) + ) if relay_parent == test_state.relay_parent => { + signed_statement.check_signature( + &test_state.signing_context, + &test_state.validator_public[0], + ).unwrap(); + + assert_eq!(*signed_statement.payload(), Statement::Valid(candidate_a_hash)); + } + ); + let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; @@ -1146,4 +1319,129 @@ mod tests { }); } + + // Test that if we are asked to second an invalid candidate we + // can still second a valid one afterwards. + #[test] + fn backing_dont_second_invalid() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + + test_startup(&mut virtual_overseer, &test_state).await; + + let pov_block_a = PoVBlock { + block_data: BlockData(vec![42, 43, 44]), + }; + + let pov_block_b = PoVBlock { + block_data: BlockData(vec![45, 46, 47]), + }; + + let pov_block_hash_a = pov_block_a.hash(); + let pov_block_hash_b = pov_block_b.hash(); + + let candidate_a = AbridgedCandidateReceipt { + parachain_index: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_block_hash: pov_block_hash_a, + ..Default::default() + }; + + let candidate_a_hash = candidate_a.hash(); + + let candidate_b = AbridgedCandidateReceipt { + parachain_index: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_block_hash: pov_block_hash_b, + ..Default::default() + }; + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate_a.clone(), + pov_block_a.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::Validate( + parent_hash, + c, + pov, + tx, + ) + ) if parent_hash == test_state.relay_parent && + pov == pov_block_a && c == candidate_a => { + tx.send(Ok(ValidationResult::Invalid)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateSelection( + CandidateSelectionMessage::Invalid(parent_hash, candidate) + ) if parent_hash == test_state.relay_parent && candidate == candidate_a + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + relay_parent, + statement, + ) + ) if relay_parent == test_state.relay_parent => { + assert_eq!(*statement.payload(), Statement::Invalid(candidate_a_hash)); + } + ); + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate_b.clone(), + pov_block_b.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::Validate( + parent_hash, + c, + pov, + tx, + ) + ) if parent_hash == test_state.relay_parent && + pov == pov_block_b && c == candidate_b => { + tx.send(Ok(ValidationResult::Valid)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + signed_statement, + ) + ) if parent_hash == test_state.relay_parent => { + signed_statement.check_signature( + &test_state.signing_context, + &test_state.validator_public[0], + ).unwrap(); + + assert_eq!(*signed_statement.payload(), Statement::Seconded(candidate_b)); + } + ); + + virtual_overseer.send(FromOverseer::Signal( + OverseerSignal::StopWork(test_state.relay_parent)) + ).await; + }); + } } diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index 873754422494..cf67644ef30c 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -24,11 +24,11 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_primitives::{Hash, parachain::{ AbridgedCandidateReceipt, CompactStatement, - EncodeAs, Signed, ValidatorIndex, + EncodeAs, Signed, SigningContext, ValidatorIndex, ValidatorId, } }; use polkadot_statement_table::{ - self as table, + generic::ValidityDoubleVote as TableValidityDoubleVote, Misbehavior as TableMisbehavior, }; @@ -85,36 +85,82 @@ pub enum MisbehaviorReport { DoubleVote(AbridgedCandidateReceipt, SignedFullStatement, SignedFullStatement), } -impl std::convert::TryFrom<(ValidatorIndex, TableMisbehavior)> for MisbehaviorReport { +pub struct FromTableMisbehavior { + pub id: ValidatorIndex, + + pub report: TableMisbehavior, + + pub signing_context: SigningContext, + + pub key: ValidatorId, +} + +impl std::convert::TryFrom for MisbehaviorReport { type Error = (); - fn try_from(a: (ValidatorIndex, TableMisbehavior)) -> Result { - let (id, report) = a; - - match report { - TableMisbehavior::ValidityDoubleVote(dv) => { - match dv { - table::generic::ValidityDoubleVote::IssuedAndValidity((c, s1), (d, s2)) => { - let receipt = c.clone(); - let signed_1 = SignedFullStatement::new(Statement::Seconded(c), id, s1); - let signed_2 = SignedFullStatement::new(Statement::Valid(d), id, s2); - - Ok(MisbehaviorReport::DoubleVote(receipt, signed_1, signed_2)) - } - table::generic::ValidityDoubleVote::IssuedAndInvalidity((c, s1), (d, s2)) => { - let receipt = c.clone(); - let signed_1 = SignedFullStatement::new(Statement::Seconded(c), id, s1); - let signed_2 = SignedFullStatement::new(Statement::Invalid(d), id, s2); - - Ok(MisbehaviorReport::DoubleVote(receipt, signed_1, signed_2)) - } - table::generic::ValidityDoubleVote::ValidityAndInvalidity(c, s1, s2) => { - let signed_1 = SignedFullStatement::new(Statement::Valid(c.hash()), id, s1); - let signed_2 = SignedFullStatement::new(Statement::Invalid(c.hash()), id, s2); - - Ok(MisbehaviorReport::SelfContradiction(c, signed_1, signed_2)) - } - } + fn try_from(f: FromTableMisbehavior) -> Result { + match f.report { + TableMisbehavior::ValidityDoubleVote( + TableValidityDoubleVote::IssuedAndValidity((c, s1), (d, s2)) + ) => { + let receipt = c.clone(); + let signed_1 = SignedFullStatement::new( + Statement::Seconded(c), + f.id, + s1, + &f.signing_context, + &f.key, + ).ok_or(())?; + let signed_2 = SignedFullStatement::new( + Statement::Valid(d), + f.id, + s2, + &f.signing_context, + &f.key, + ).ok_or(())?; + + Ok(MisbehaviorReport::DoubleVote(receipt, signed_1, signed_2)) + } + TableMisbehavior::ValidityDoubleVote( + TableValidityDoubleVote::IssuedAndInvalidity((c, s1), (d, s2)) + ) => { + let receipt = c.clone(); + let signed_1 = SignedFullStatement::new( + Statement::Seconded(c), + f.id, + s1, + &f.signing_context, + &f.key, + ).ok_or(())?; + let signed_2 = SignedFullStatement::new( + Statement::Invalid(d), + f.id, + s2, + &f.signing_context, + &f.key, + ).ok_or(())?; + + Ok(MisbehaviorReport::DoubleVote(receipt, signed_1, signed_2)) + } + TableMisbehavior::ValidityDoubleVote( + TableValidityDoubleVote::ValidityAndInvalidity(c, s1, s2) + ) => { + let signed_1 = SignedFullStatement::new( + Statement::Valid(c.hash()), + f.id, + s1, + &f.signing_context, + &f.key, + ).ok_or(())?; + let signed_2 = SignedFullStatement::new( + Statement::Invalid(c.hash()), + f.id, + s2, + &f.signing_context, + &f.key, + ).ok_or(())?; + + Ok(MisbehaviorReport::SelfContradiction(c, signed_1, signed_2)) } _ => { // TODO: match other cases diff --git a/node/subsystem/src/messages.rs b/node/subsystem/src/messages.rs index c9c26d980c9e..d957d3e93ede 100644 --- a/node/subsystem/src/messages.rs +++ b/node/subsystem/src/messages.rs @@ -29,7 +29,7 @@ use polkadot_primitives::{BlockNumber, Hash, Signature}; use polkadot_primitives::parachain::{ AbridgedCandidateReceipt, PoVBlock, ErasureChunk, BackedCandidate, Id as ParaId, SignedAvailabilityBitfield, SigningContext, ValidatorId, ValidationCode, ValidatorIndex, - CoreAssignment, CoreOccupied, HeadData, + CoreAssignment, CoreOccupied, HeadData, ValidationResult, CandidateDescriptor, }; use polkadot_node_primitives::{ MisbehaviorReport, SignedFullStatement, View, ProtocolId, @@ -52,10 +52,10 @@ pub enum CandidateSelectionMessage { pub enum CandidateBackingMessage { /// Registers a stream listener for updates to the set of backable candidates that could be backed /// in a child of the given relay-parent, referenced by its hash. - RegisterBackingWatcher(Hash, mpsc::Sender), + GetBackedCandidates(Hash, oneshot::Sender>), /// Note that the Candidate Backing subsystem should second the given candidate in the context of the /// given relay-parent (ref. by hash). This candidate must be validated. - Second(Hash, AbridgedCandidateReceipt), + Second(Hash, AbridgedCandidateReceipt, PoVBlock), /// Note a validator's statement about a particular candidate. Disagreements about validity must be escalated /// to a broader check by Misbehavior Arbitration. Agreements are simply tallied until a quorum is reached. Statement(Hash, SignedFullStatement), @@ -76,7 +76,7 @@ pub enum CandidateValidationMessage { Hash, AbridgedCandidateReceipt, PoVBlock, - oneshot::Sender>, + oneshot::Sender>, ), } @@ -220,6 +220,22 @@ pub enum ProvisionerMessage { ProvisionableData(ProvisionableData), } +/// Message to the PoVDistributionMessage. +#[derive(Debug)] +pub enum PoVDistributionMessage { + /// Note a statement by a validator on a relay-parent. `Seconded` statements must always + /// have been passed in before `Valid` or `Invalid` statements. + ValidatorStatement(Hash, SignedFullStatement), + /// Fetch a PoV from the network. + /// (relay_parent, PoV-hash, Response channel). + FetchPoV(Hash, CandidateDescriptor, oneshot::Sender), + /// Distribute a PoV for the given relay-parent and CandidateDescriptor. + /// The PoV should correctly hash to the PoV hash mentioned in the CandidateDescriptor + DistributePoV(Hash, CandidateDescriptor, PoVBlock), + /// An update from the network bridge. + NetworkBridgeUpdate(NetworkBridgeEvent), +} + /// A message type tying together all message types that are used across Subsystems. #[derive(Debug)] pub enum AllMessages { @@ -241,4 +257,6 @@ pub enum AllMessages { RuntimeApi(RuntimeApiMessage), /// Message for the availability store subsystem. AvailabilityStore(AvailabilityStoreMessage), + /// Message for the PoV distribution subsystem. + PoVDistribution(PoVDistributionMessage), } diff --git a/primitives/src/parachain.rs b/primitives/src/parachain.rs index 1247f47dcd06..f7b4daa9d6b0 100644 --- a/primitives/src/parachain.rs +++ b/primitives/src/parachain.rs @@ -476,6 +476,52 @@ pub struct AbridgedCandidateReceipt { pub commitments: CandidateCommitments, } +/// A unique descriptor of the candidate receipt. +#[cfg_attr(feature = "std", derive(Debug, Default))] +pub struct CandidateDescriptor { + /// The ID of the parachain this is a candidate for. + pub parachain_index: Id, + /// The hash of the relay-chain block this should be executed in + /// the context of. + pub relay_parent: H, + /// The collator's relay-chain account ID + pub collator: CollatorId, + /// Signature on blake2-256 of the block data by collator. + pub signature: CollatorSignature, + /// Hash of the PoVBlock. + pub pov_block_hash: H, +} + +impl From> for CandidateDescriptor { + fn from(a: AbridgedCandidateReceipt) -> Self { + let AbridgedCandidateReceipt { + parachain_index, + relay_parent, + collator, + signature, + pov_block_hash, + .. + } = a; + + Self { + parachain_index, + relay_parent, + collator, + signature, + pov_block_hash, + } + } +} + +/// A candidate-receipt with commitments directly included. +pub struct CommitedCandidateReceipt { + /// The descriptor of the candidae. + pub descriptor: CandidateDescriptor, + + /// The commitments of the candidate receipt. + pub commitments: CandidateCommitments +} + impl + Encode> AbridgedCandidateReceipt { /// Check integrity vs. provided block data. pub fn check_signature(&self) -> Result<(), ()> { @@ -706,6 +752,15 @@ pub enum CompactStatement { /// A signed compact statement, suitable to be sent to the chain. pub type SignedStatement = Signed; +/// Result of the validation of the candidate. +#[derive(RuntimeDebug)] +pub enum ValidationResult { + /// Candidate is valid. + Valid, + /// Candidate is invalid. + Invalid, +} + /// An either implicit or explicit attestation to the validity of a parachain /// candidate. #[derive(Clone, Eq, PartialEq, Decode, Encode, RuntimeDebug)] @@ -983,17 +1038,23 @@ impl, RealPayload: Encode> Signed( payload: Payload, validator_index: ValidatorIndex, signature: ValidatorSignature, - ) -> Self { - Self { + context: &SigningContext, + key: &ValidatorId, + ) -> Option { + let s = Self { payload, validator_index, signature, real_payload: std::marker::PhantomData, - } + }; + + s.check_signature(context, key).ok()?; + + Some(s) } /// Sign this payload with the given context and key, storing the validator index. From af9f34a26ddb6019130222f5e57ee694bc3284b9 Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Fri, 3 Jul 2020 19:33:58 +0300 Subject: [PATCH 08/18] Remove a blank line --- validation/src/shared_table/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/validation/src/shared_table/mod.rs b/validation/src/shared_table/mod.rs index 4cde687c22f0..fd6702a7e640 100644 --- a/validation/src/shared_table/mod.rs +++ b/validation/src/shared_table/mod.rs @@ -706,7 +706,6 @@ mod tests { &validity_other_key.into(), &signing_context, ); - let signed_statement = ::table::generic::SignedStatement { statement: candidate_statement, signature: signature.into(), From 245cc96d43cab2b7def306dab86ed96b1d81adfd Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Sat, 4 Jul 2020 12:50:29 +0300 Subject: [PATCH 09/18] Update guide and some types --- node/core/backing/src/lib.rs | 11 ++-- node/primitives/src/lib.rs | 57 +++++++++++++++---- node/subsystem/src/messages.rs | 8 +-- primitives/src/parachain.rs | 9 --- .../src/node/backing/candidate-backing.md | 26 +++++++-- .../src/types/overseer-protocol.md | 6 +- 6 files changed, 81 insertions(+), 36 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index f1da80412dec..93c1f9edc9ad 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -41,12 +41,12 @@ use polkadot_primitives::{ Hash, BlockNumber, parachain::{ AbridgedCandidateReceipt, BackedCandidate, Id as ParaId, ValidatorPair, ValidatorId, - ValidatorIndex, HeadData, ValidationCode, SigningContext, PoVBlock, ValidationResult, + ValidatorIndex, HeadData, ValidationCode, SigningContext, PoVBlock, CandidateDescriptor, }, }; use polkadot_node_primitives::{ - FromTableMisbehavior, Statement, SignedFullStatement, MisbehaviorReport, + FromTableMisbehavior, Statement, SignedFullStatement, MisbehaviorReport, ValidationResult, }; use polkadot_subsystem::{ FromOverseer, OverseerSignal, Subsystem, SubsystemContext, SpawnedSubsystem, @@ -65,6 +65,7 @@ use statement_table::{ #[derive(Debug, derive_more::From)] enum Error { + JobNotFound(Hash), InvalidSignature, #[from] ValidationFailed(ValidationFailed), @@ -705,14 +706,14 @@ impl Jobs { Ok(()) } - async fn stop_job(&mut self, parent_hash: Hash) -> Result<(), ()> { + async fn stop_job(&mut self, parent_hash: Hash) -> Result<(), Error> { match self.running.remove(&parent_hash) { Some(handle) => { Pin::new(&mut self.outgoing_msgs).remove(handle.su_handle); handle.stop().await; Ok(()) } - None => Err(()) + None => Err(Error::JobNotFound(parent_hash)) } } @@ -769,7 +770,7 @@ impl CandidateBackingSubsystem } } FromOverseer::Signal(OverseerSignal::StopWork(hash)) => { - if let Err(e) = jobs.stop_job(hash) { + if let Err(e) = jobs.stop_job(hash).await { log::error!("Failed to spawn a job: {:?}", e); break; } diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index cf67644ef30c..59043ffce677 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -28,7 +28,10 @@ use polkadot_primitives::{Hash, } }; use polkadot_statement_table::{ - generic::ValidityDoubleVote as TableValidityDoubleVote, + generic::{ + ValidityDoubleVote as TableValidityDoubleVote, + MultipleCandidates as TableMultipleCandidates, + }, Misbehavior as TableMisbehavior, }; @@ -73,7 +76,7 @@ pub enum MisbehaviorReport { /// These validator nodes disagree on this candidate's validity, please figure it out /// /// Most likely, the list of statments all agree except for the final one. That's not - /// guaranteed, though; if somehow we become aware of lots of + /// guarjnteed, though; if somehow we become aware of lots of /// statements disagreeing about the validity of a candidate before taking action, /// this message should be dispatched with all of them, in arbitrary order. /// @@ -82,19 +85,30 @@ pub enum MisbehaviorReport { /// I've noticed a peer contradicting itself about a particular candidate SelfContradiction(AbridgedCandidateReceipt, SignedFullStatement, SignedFullStatement), /// This peer has seconded more than one parachain candidate for this relay parent head - DoubleVote(AbridgedCandidateReceipt, SignedFullStatement, SignedFullStatement), + DoubleVote(SignedFullStatement, SignedFullStatement), } +/// A utility struct used to convert `TableMisbehavior` to `MisbehaviorReport`s. pub struct FromTableMisbehavior { + /// Index of the validator. pub id: ValidatorIndex, - + /// The misbehavior reported by the table. pub report: TableMisbehavior, - + /// Signing context. pub signing_context: SigningContext, - + /// Misbehaving validator's public key. pub key: ValidatorId, } +/// Result of the validation of the candidate. +#[derive(Debug)] +pub enum ValidationResult { + /// Candidate is valid. + Valid, + /// Candidate is invalid. + Invalid, +} + impl std::convert::TryFrom for MisbehaviorReport { type Error = (); @@ -119,7 +133,7 @@ impl std::convert::TryFrom for MisbehaviorReport { &f.key, ).ok_or(())?; - Ok(MisbehaviorReport::DoubleVote(receipt, signed_1, signed_2)) + Ok(MisbehaviorReport::SelfContradiction(receipt, signed_1, signed_2)) } TableMisbehavior::ValidityDoubleVote( TableValidityDoubleVote::IssuedAndInvalidity((c, s1), (d, s2)) @@ -140,7 +154,7 @@ impl std::convert::TryFrom for MisbehaviorReport { &f.key, ).ok_or(())?; - Ok(MisbehaviorReport::DoubleVote(receipt, signed_1, signed_2)) + Ok(MisbehaviorReport::SelfContradiction(receipt, signed_1, signed_2)) } TableMisbehavior::ValidityDoubleVote( TableValidityDoubleVote::ValidityAndInvalidity(c, s1, s2) @@ -162,10 +176,31 @@ impl std::convert::TryFrom for MisbehaviorReport { Ok(MisbehaviorReport::SelfContradiction(c, signed_1, signed_2)) } - _ => { - // TODO: match other cases - Err(()) + TableMisbehavior::MultipleCandidates( + TableMultipleCandidates { + first, + second, + } + ) => { + let signed_1 = SignedFullStatement::new( + Statement::Seconded(first.0), + f.id, + first.1, + &f.signing_context, + &f.key, + ).ok_or(())?; + + let signed_2 = SignedFullStatement::new( + Statement::Seconded(second.0), + f.id, + second.1, + &f.signing_context, + &f.key, + ).ok_or(())?; + + Ok(MisbehaviorReport::DoubleVote(signed_1, signed_2)) } + _ => Err(()), } } } diff --git a/node/subsystem/src/messages.rs b/node/subsystem/src/messages.rs index d957d3e93ede..9e75f05e50eb 100644 --- a/node/subsystem/src/messages.rs +++ b/node/subsystem/src/messages.rs @@ -29,10 +29,10 @@ use polkadot_primitives::{BlockNumber, Hash, Signature}; use polkadot_primitives::parachain::{ AbridgedCandidateReceipt, PoVBlock, ErasureChunk, BackedCandidate, Id as ParaId, SignedAvailabilityBitfield, SigningContext, ValidatorId, ValidationCode, ValidatorIndex, - CoreAssignment, CoreOccupied, HeadData, ValidationResult, CandidateDescriptor, + CoreAssignment, CoreOccupied, HeadData, CandidateDescriptor, }; use polkadot_node_primitives::{ - MisbehaviorReport, SignedFullStatement, View, ProtocolId, + MisbehaviorReport, SignedFullStatement, View, ProtocolId, ValidationResult, }; /// A notification of a new backed candidate. @@ -50,8 +50,8 @@ pub enum CandidateSelectionMessage { /// Messages received by the Candidate Backing subsystem. #[derive(Debug)] pub enum CandidateBackingMessage { - /// Registers a stream listener for updates to the set of backable candidates that could be backed - /// in a child of the given relay-parent, referenced by its hash. + /// Requests a set of backable candidates that could be backed in a child of the given + /// relay-parent, referenced by its hash. GetBackedCandidates(Hash, oneshot::Sender>), /// Note that the Candidate Backing subsystem should second the given candidate in the context of the /// given relay-parent (ref. by hash). This candidate must be validated. diff --git a/primitives/src/parachain.rs b/primitives/src/parachain.rs index f7b4daa9d6b0..9321557f1af8 100644 --- a/primitives/src/parachain.rs +++ b/primitives/src/parachain.rs @@ -752,15 +752,6 @@ pub enum CompactStatement { /// A signed compact statement, suitable to be sent to the chain. pub type SignedStatement = Signed; -/// Result of the validation of the candidate. -#[derive(RuntimeDebug)] -pub enum ValidationResult { - /// Candidate is valid. - Valid, - /// Candidate is invalid. - Invalid, -} - /// An either implicit or explicit attestation to the validity of a parachain /// candidate. #[derive(Clone, Eq, PartialEq, Decode, Encode, RuntimeDebug)] diff --git a/roadmap/implementors-guide/src/node/backing/candidate-backing.md b/roadmap/implementors-guide/src/node/backing/candidate-backing.md index abf84397fb39..2044fbd8e75b 100644 --- a/roadmap/implementors-guide/src/node/backing/candidate-backing.md +++ b/roadmap/implementors-guide/src/node/backing/candidate-backing.md @@ -18,7 +18,8 @@ Output: - [`RuntimeApiMessage`][RAM] - [`CandidateSelectionMessage`][CSM] - [`ProvisionerMessage`][PM] -- PoV Distribution (Statement Distribution) +- [`PoVDistributionMessage`][PDM] +- [`StatementDistributionMessage`][SDM] ## Functionality @@ -35,8 +36,9 @@ The subsystem should maintain a set of handles to Candidate Backing Jobs that ar ### On Receiving `CandidateBackingMessage` -* If the message is a [`CandidateBackingMessage`][CBM]`::RegisterBackingWatcher`, register the watcher and trigger it each time a new candidate is backable. Also trigger it once initially if there are any backable candidates at the time of receipt. +* If the message is a [`CandidateBackingMessage`][CBM]`::GetBackedCandidates`, get all backable candidates from the statement table and send them back. * If the message is a [`CandidateBackingMessage`][CBM]`::Second`, sign and dispatch a `Seconded` statement only if we have not seconded any other candidate and have not signed a `Valid` statement for the requested candidate. Signing both a `Seconded` and `Valid` message is a double-voting misbehavior with a heavy penalty, and this could occur if another validator has seconded the same candidate and we've received their message before the internal seconding request. +* If the message is a [`CandidateBackingMessage`][CBM]`::Statement`, count the statement to the quorum. If the statement in the message is `Seconded` and it contains a candidate that belongs to our assignment, request the corresponding `PoV` from the `PoVDistribution` and launch validation. Issue our own `Valid` or `Invalid` statement as a result. > big TODO: "contextual execution" > @@ -63,15 +65,27 @@ The goal of a Candidate Backing Job is to produce as many backable candidates as ```rust match msg { + CetBackedCandidates(hash, tx) => { + // Send back a set of backable candidates. + } CandidateBackingMessage::Second(hash, candidate) => { if candidate is unknown and in local assignment { spawn_validation_work(candidate, parachain head, validation function) } } + CandidateBackingMessage::Statement(hash, statement) => { + // count to the votes on this candidate + if let Statement::Seconded(candidate) = statement { + if candidate.parachain_id == our_assignment { + spawn_validation_work(candidate, parachain head, validation function) + } + } + } } ``` Add `Seconded` statements and `Valid` statements to a quorum. If quorum reaches validator-group majority, send a [`ProvisionerMessage`][PM]`::ProvisionableData(ProvisionableData::BackedCandidate(BackedCandidate))` message. +`Invalid` statements that conflict with already witnessed `Seconded` and `Valid` statements for the given candidate, statements that are double-votes, self-contradictions and so on, should result in issuing a [`ProvisionerMessage`][PM]`::MisbehaviorReport` message for each newly detected case of this kind. ### Validating Candidates. @@ -94,14 +108,16 @@ fn spawn_validation_work(candidate, parachain head, validation function) { ### Fetch Pov Block Create a `(sender, receiver)` pair. -Dispatch a `PovFetchSubsystemMessage(relay_parent, candidate_hash, sender)` and listen on the receiver for a response. +Dispatch a [`PoVDistributionMessage`][PDM]`::FecthPoV(relay_parent, candidate_hash, sender)` and listen on the receiver for a response. ### Validate PoV Block Create a `(sender, receiver)` pair. Dispatch a `CandidateValidationMessage::Validate(validation function, candidate, pov, sender)` and listen on the receiver for a response. -> TODO: send statements to Statement Distribution subsystem, handle shutdown signal from candidate backing subsystem +### Distribute Signed Statemnet + +Dispatch a [`StatementDistributionMessage`][PDM]`::Share(relay_parent, SignedFullStatement)`. [OverseerSignal]: ../../types/overseer-protocol.md#overseer-signal [Statement]: ../../types/backing.md#statement-type @@ -111,6 +127,8 @@ Dispatch a `CandidateValidationMessage::Validate(validation function, candidate, [CVM]: ../../types/overseer-protocol.md#validation-request-type [PM]: ../../types/overseer-protocol.md#provisioner-message [CBM]: ../../types/overseer-protocol.md#candidate-backing-message +[PDM]: ../../types/overseer-protocol.md#pov-distribution-message +[SDM]: ../../types/overseer-protocol.md#statement-distribution-message [CS]: candidate-selection.md [CV]: ../utility/candidate-validation.md diff --git a/roadmap/implementors-guide/src/types/overseer-protocol.md b/roadmap/implementors-guide/src/types/overseer-protocol.md index b10211e527b6..8e4dd1283cf0 100644 --- a/roadmap/implementors-guide/src/types/overseer-protocol.md +++ b/roadmap/implementors-guide/src/types/overseer-protocol.md @@ -86,9 +86,9 @@ enum BitfieldSigningMessage { } ```rust enum CandidateBackingMessage { - /// Registers a stream listener for updates to the set of backable candidates that could be backed - /// in a child of the given relay-parent, referenced by its hash. - RegisterBackingWatcher(Hash, TODO), + /// Requests a set of backable candidates that could be backed in a child of the given + /// relay-parent, referenced by its hash. + GetBackedCandidates(Hash, ResponseChannel>), /// Note that the Candidate Backing subsystem should second the given candidate in the context of the /// given relay-parent (ref. by hash). This candidate must be validated using the provided PoV. Second(Hash, CandidateReceipt, PoV), From 63124aa9ebd44d6cd4501b253730ac0a1e156c62 Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Sat, 4 Jul 2020 17:38:41 +0300 Subject: [PATCH 10/18] Adds run_job function --- node/core/backing/src/lib.rs | 316 ++++++++++++++++++----------------- 1 file changed, 159 insertions(+), 157 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 93c1f9edc9ad..681efa755629 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -65,6 +65,7 @@ use statement_table::{ #[derive(Debug, derive_more::From)] enum Error { + NotInValidatorSet, JobNotFound(Hash), InvalidSignature, #[from] @@ -79,21 +80,17 @@ enum Error { /// Holds all data needed for candidate backing job operation. struct CandidateBackingJob { - /// The keystore which holds the signing keys. - keystore: KeyStorePtr, /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, /// Inbound message channel receiving part. rx_to: mpsc::Receiver, - /// Inbound message sending part. Handed out to whom it may concern. - tx_to: mpsc::Sender, /// Outbound message channel sending part. tx_from: mpsc::Sender, /// The validation codes of the `ParaId`s we are assigned as validators to. - validation_code: ValidationCode, + _validation_code: ValidationCode, /// `HeadData`s of the parachains that this validator is assigned to. - head_data: HeadData, + _head_data: HeadData, /// The `ParaId`s assigned to this validator. assignment: ParaId, /// We issued `Valid` statements on about these candidates. @@ -208,152 +205,8 @@ fn signing_key(validators: &[ValidatorId], keystore: &KeyStorePtr) -> Option (Self, mpsc::Receiver) { - let (tx_to, rx_to) = mpsc::channel(CHANNEL_CAPACITY); - let (tx_from, rx_from) = mpsc::channel(CHANNEL_CAPACITY); - - ( - Self { - keystore, - parent, - rx_to, - tx_to, - tx_from, - - validation_code: ValidationCode::default(), - head_data: HeadData::default(), - assignment: ParaId::default(), - - issued_validity: HashSet::default(), - seconded: None, - reported_misbehavior_for: HashSet::default(), - - table: Table::default(), - table_context: TableContext::default(), - }, - rx_from, - ) - } - - /// Hand out the sending part of the channel to communicate with this job. - fn tx(&mut self) -> mpsc::Sender { - self.tx_to.clone() - } - - /// Request a validator set from the `RuntimeApi`. - async fn request_validators(&mut self) -> Result, Error> { - let (tx, rx) = oneshot::channel(); - - self.tx_from.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( - self.parent.clone(), - RuntimeApiRequest::Validators(tx), - ) - )).await?; - - Ok(rx.await?) - } - - /// Request the scheduler roster from `RuntimeApi`. - async fn request_validator_groups(&mut self) -> Result { - let (tx, rx) = oneshot::channel(); - - self.tx_from.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( - self.parent.clone(), - RuntimeApiRequest::ValidatorGroups(tx), - ) - )).await?; - - Ok(rx.await?) - } - - /// Request the validation code for some `ParaId` at given block - /// from `RuntimeApi`. - async fn request_validation_code( - &mut self, - id: ParaId, - parent: BlockNumber, - parablock: Option, - ) -> Result { - let (tx, rx) = oneshot::channel(); - - self.tx_from.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( - self.parent.clone(), - RuntimeApiRequest::ValidationCode(id, parent, parablock, tx), - ) - )).await?; - - Ok(rx.await?) - } - - /// Request a `SigningContext` from the `RuntimeApi`. - async fn request_signing_context(&mut self) -> Result { - let (tx, rx) = oneshot::channel(); - - self.tx_from.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( - self.parent.clone(), - RuntimeApiRequest::SigningContext(tx), - ) - )).await?; - - Ok(rx.await?) - } - - /// Request `HeadData` for some `ParaId` from `RuntimeApi`. - async fn request_head_data(&mut self, id: ParaId) -> Result { - let (tx, rx) = oneshot::channel(); - - self.tx_from.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( - self.parent.clone(), - RuntimeApiRequest::HeadData(id, tx), - ) - )).await?; - - Ok(rx.await?) - } - - /// Logic that runs on startup. - async fn on_startup(&mut self) -> Result<(), Error> { - let validators = self.request_validators().await?; - - let roster = self.request_validator_groups().await?; - - self.table_context.key = signing_key(&validators[..], &self.keystore); - - for assignment in roster.scheduled { - if let Some(groups) = roster.validator_groups.get(assignment.group_idx.0 as usize) { - self.table_context.groups.insert( - assignment.para_id, - HashSet::from_iter(groups.iter().copied()), - ); - } - } - - if let Some(ref key) = self.table_context.key { - if let Some(idx) = validators.iter().position(|k| *k == key.public()) { - let idx = idx as u32; - for (para_id, group) in self.table_context.groups.iter() { - if group.contains(&idx) { - self.assignment = *para_id; - break; - } - } - } - } - - self.validation_code = self.request_validation_code(self.assignment, 0, None).await?; - self.head_data = self.request_head_data(self.assignment).await?; - self.table_context.signing_context = self.request_signing_context().await?; - self.table_context.validators = validators; - - Ok(()) - } - /// Run asynchronously. async fn run(mut self) -> Result<(), Error> { - self.on_startup().await?; - while let Some(msg) = self.rx_to.next().await { match msg { ToJob::CandidateBacking(msg) => { @@ -665,6 +518,151 @@ struct Jobs { outgoing_msgs: StreamUnordered>, } +async fn run_job( + parent: Hash, + keystore: KeyStorePtr, + rx_to: mpsc::Receiver, + mut tx_from: mpsc::Sender, +) -> Result<(), Error> { + let validators = request_validators(parent, &mut tx_from).await?; + let roster = request_validator_groups(parent, &mut tx_from).await?; + let key = signing_key(&validators[..], &keystore).ok_or(Error::NotInValidatorSet)?; + let mut groups = HashMap::new(); + + for assignment in roster.scheduled { + if let Some(g) = roster.validator_groups.get(assignment.group_idx.0 as usize) { + groups.insert( + assignment.para_id, + HashSet::from_iter(g.iter().copied()), + ); + } + } + + let mut assignment = Default::default(); + + if let Some(idx) = validators.iter().position(|k| *k == key.public()) { + let idx = idx as u32; + for (para_id, group) in groups.iter() { + if group.contains(&idx) { + assignment = *para_id; + break; + } + } + } + + let validation_code = request_validation_code(parent, &mut tx_from, assignment, 0, None).await?; + let head_data = request_head_data(parent, &mut tx_from, assignment).await?; + let signing_context = request_signing_context(parent, &mut tx_from).await?; + + let table_context = TableContext { + signing_context, + key: Some(key), + groups, + validators, + }; + + let job = CandidateBackingJob { + parent, + rx_to, + tx_from, + _validation_code: validation_code, + _head_data: head_data, + assignment, + issued_validity: HashSet::new(), + seconded: None, + reported_misbehavior_for: HashSet::new(), + table: Table::default(), + table_context, + }; + + job.run().await +} + +/// Request a validator set from the `RuntimeApi`. +async fn request_validators( + parent: Hash, + s: &mut mpsc::Sender, +) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + + s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::Validators(tx), + ) + )).await?; + + Ok(rx.await?) +} + +/// Request the scheduler roster from `RuntimeApi`. +async fn request_validator_groups( + parent: Hash, + s: &mut mpsc::Sender, +) -> Result { + let (tx, rx) = oneshot::channel(); + + s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::ValidatorGroups(tx), + ) + )).await?; + + Ok(rx.await?) +} + +/// Request the validation code for some `ParaId` at given block +/// from `RuntimeApi`. +async fn request_validation_code( + parent: Hash, + s: &mut mpsc::Sender, + id: ParaId, + parent_number: BlockNumber, + parablock: Option, +) -> Result { + let (tx, rx) = oneshot::channel(); + + s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::ValidationCode(id, parent_number, parablock, tx), + ) + )).await?; + + Ok(rx.await?) +} + +/// Request a `SigningContext` from the `RuntimeApi`. +async fn request_signing_context( + parent: Hash, + s: &mut mpsc::Sender, +) -> Result { + let (tx, rx) = oneshot::channel(); + + s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::SigningContext(tx), + ) + )).await?; + + Ok(rx.await?) +} + +/// Request `HeadData` for some `ParaId` from `RuntimeApi`. +async fn request_head_data( + parent: Hash, + s: &mut mpsc::Sender, + id: ParaId, +) -> Result { + let (tx, rx) = oneshot::channel(); + + s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::HeadData(id, tx), + ) + )).await?; + + Ok(rx.await?) +} + impl Jobs { fn new(spawner: S) -> Self { Self { @@ -675,15 +673,19 @@ impl Jobs { } fn spawn_job(&mut self, parent_hash: Hash, keystore: KeyStorePtr) -> Result<(), Error> { - let (mut job, from_job) = CandidateBackingJob::new(parent_hash,keystore); - - let to_job = job.tx(); + let (to_job_tx, to_job_rx) = mpsc::channel(CHANNEL_CAPACITY); + let (from_job_tx, from_job_rx) = mpsc::channel(CHANNEL_CAPACITY); let (future, abort_handle) = future::abortable(async move { - if let Err(e) = job.run().await { - log::error!("Job terminated with an an error: {:?}", e); + if let Err(e) = run_job(parent_hash, keystore, to_job_rx, from_job_tx).await { + log::error!( + "CandidateBackingJob({}) finished with an error {:?}", + parent_hash, + e, + ); } }); + let (finished_tx, finished) = oneshot::channel(); let future = async move { @@ -692,11 +694,11 @@ impl Jobs { }; self.spawner.spawn(future)?; - let su_handle = self.outgoing_msgs.push(from_job); + let su_handle = self.outgoing_msgs.push(from_job_rx); let handle = JobHandle { abort_handle, - to_job, + to_job: to_job_tx, finished, su_handle, }; From 1b744ed2b11ea372be9d135b5a3a723931733b8f Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Sat, 4 Jul 2020 18:11:10 +0300 Subject: [PATCH 11/18] Some comments and refactorings --- node/core/backing/src/lib.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 681efa755629..009d7aba998a 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -734,6 +734,7 @@ impl Jobs { } } +/// An implementation of the Candidate Backing subsystem. pub struct CandidateBackingSubsystem { spawner: S, keystore: KeyStorePtr, @@ -745,6 +746,7 @@ impl CandidateBackingSubsystem S: Spawn + Clone, Context: SubsystemContext, { + /// Creates a new `CandidateBackingSubsystem`. pub fn new(keystore: KeyStorePtr, spawner: S) -> Self { Self { spawner, @@ -974,17 +976,19 @@ mod tests { test_state: &TestState, ) { // Start work on some new parent. - virtual_overseer.send(FromOverseer::Signal(OverseerSignal::StartWork(test_state.relay_parent))).await; + virtual_overseer.send(FromOverseer::Signal( + OverseerSignal::StartWork(test_state.relay_parent)) + ).await; // Check that subsystem job issues a request for a validator set. - match virtual_overseer.recv().await { + assert_matches!( + virtual_overseer.recv().await, AllMessages::RuntimeApi( RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) ) if parent == test_state.relay_parent => { tx.send(test_state.validator_public.clone()).unwrap(); } - msg => panic!("unexpected message {:?}", msg), - } + ); // Check that subsystem job issues a request for the validator groups. assert_matches!( From f52aa09e7653aa17e8cdf004ba78a35dec5d2074 Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Tue, 7 Jul 2020 22:32:29 +0300 Subject: [PATCH 12/18] Fix review --- Cargo.lock | 1 + node/core/backing/Cargo.toml | 1 + node/core/backing/src/lib.rs | 353 +++++++++++++++------- node/overseer/examples/minimal-example.rs | 1 + node/overseer/src/lib.rs | 1 + node/primitives/src/lib.rs | 2 +- node/subsystem/src/messages.rs | 8 +- primitives/src/parachain.rs | 13 +- 8 files changed, 276 insertions(+), 104 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ed4f9d55f802..ae019dd9050b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4426,6 +4426,7 @@ dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", "log 0.4.8", + "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-primitives", diff --git a/node/core/backing/Cargo.toml b/node/core/backing/Cargo.toml index bf859e327427..b25055293add 100644 --- a/node/core/backing/Cargo.toml +++ b/node/core/backing/Cargo.toml @@ -16,6 +16,7 @@ primitives = { package = "sp-core", git = "https://github.com/paritytech/substra polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } +erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" } statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" } futures-timer = "3.0.2" streamunordered = "0.5.1" diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 009d7aba998a..af04efb444a5 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -38,11 +38,12 @@ use streamunordered::{StreamUnordered, StreamYield}; use primitives::Pair; use keystore::KeyStorePtr; use polkadot_primitives::{ - Hash, BlockNumber, + Hash, parachain::{ AbridgedCandidateReceipt, BackedCandidate, Id as ParaId, ValidatorPair, ValidatorId, - ValidatorIndex, HeadData, ValidationCode, SigningContext, PoVBlock, - CandidateDescriptor, + ValidatorIndex, HeadData, SigningContext, PoVBlock, OmittedValidationData, + CandidateDescriptor, LocalValidationData, GlobalValidationSchedule, AvailableData, + ErasureChunk, }, }; use polkadot_node_primitives::{ @@ -55,20 +56,23 @@ use polkadot_subsystem::messages::{ AllMessages, CandidateBackingMessage, CandidateSelectionMessage, SchedulerRoster, RuntimeApiMessage, RuntimeApiRequest, CandidateValidationMessage, ValidationFailed, StatementDistributionMessage, NewBackedCandidate, ProvisionerMessage, ProvisionableData, - PoVDistributionMessage, + PoVDistributionMessage, AvailabilityStoreMessage, }; use statement_table::{ generic::AttestedCandidate as TableAttestedCandidate, Table, Context as TableContextTrait, Statement as TableStatement, - SignedStatement as TableSignedStatement, + SignedStatement as TableSignedStatement, Summary as TableSummary, }; #[derive(Debug, derive_more::From)] enum Error { NotInValidatorSet, + LocalValidationDataMissing, JobNotFound(Hash), InvalidSignature, #[from] + Erasure(erasure_coding::Error), + #[from] ValidationFailed(ValidationFailed), #[from] Oneshot(oneshot::Canceled), @@ -87,10 +91,12 @@ struct CandidateBackingJob { /// Outbound message channel sending part. tx_from: mpsc::Sender, - /// The validation codes of the `ParaId`s we are assigned as validators to. - _validation_code: ValidationCode, /// `HeadData`s of the parachains that this validator is assigned to. - _head_data: HeadData, + head_data: HeadData, + /// Global validation schedule. + global_validation_schedule: GlobalValidationSchedule, + /// Local validation data of the parachain this validator is assigned to. + local_validation_data: LocalValidationData, /// The `ParaId`s assigned to this validator. assignment: ParaId, /// We issued `Valid` statements on about these candidates. @@ -105,22 +111,20 @@ struct CandidateBackingJob { } const fn group_quorum(n_validators: usize) -> usize { - let mut threshold = (n_validators) / 2; - threshold += 1; - threshold + (n_validators / 2) + 1 } #[derive(Default)] struct TableContext { signing_context: SigningContext, key: Option, - groups: HashMap>, + groups: HashMap>, validators: Vec, } impl TableContextTrait for TableContext { fn is_member_of(&self, authority: ValidatorIndex, group: &ParaId) -> bool { - self.groups.get(group).map_or(false, |g| g.get(&authority).is_some()) + self.groups.get(group).map_or(false, |g| g.iter().position(|&a| a == authority).is_some()) } fn requisite_votes(&self, group: &ParaId) -> usize { @@ -156,6 +160,7 @@ enum ToJob { /// A message type that is sent from `CandidateBackingJob` to `CandidateBackingSubsystem`. enum FromJob { + AvailabilityStore(AvailabilityStoreMessage), RuntimeApiMessage(RuntimeApiMessage), CandidateValidation(CandidateValidationMessage), CandidateSelection(CandidateSelectionMessage), @@ -167,6 +172,7 @@ enum FromJob { impl From for AllMessages { fn from(f: FromJob) -> Self { match f { + FromJob::AvailabilityStore(msg) => AllMessages::AvailabilityStore(msg), FromJob::RuntimeApiMessage(msg) => AllMessages::RuntimeApi(msg), FromJob::CandidateValidation(msg) => AllMessages::CandidateValidation(msg), FromJob::CandidateSelection(msg) => AllMessages::CandidateSelection(msg), @@ -201,7 +207,6 @@ fn signing_key(validators: &[ValidatorId], keystore: &KeyStorePtr) -> Option(&v).ok() }) - .map(|pair| pair) } impl CandidateBackingJob { @@ -224,7 +229,7 @@ impl CandidateBackingJob { candidate: AbridgedCandidateReceipt, ) -> Result<(), Error> { self.tx_from.send(FromJob::CandidateSelection( - CandidateSelectionMessage::Invalid(self.parent, candidate) + CandidateSelectionMessage::Invalid(self.parent, candidate) )).await?; Ok(()) @@ -236,12 +241,14 @@ impl CandidateBackingJob { candidate: AbridgedCandidateReceipt, pov: PoVBlock, ) -> Result { - let valid = self.request_candidate_validation(candidate.clone(), pov).await?; + let valid = self.request_candidate_validation(candidate.clone(), pov.clone()).await?; let statement = match valid { ValidationResult::Valid => { // make PoV available for later distribution. Send data to the availability // store to keep. Sign and dispatch `valid` statement to network if we // have not seconded the given candidate. + self.make_pov_available(pov).await?; + self.issued_validity.insert(candidate.hash()); Statement::Seconded(candidate) } ValidationResult::Invalid => { @@ -252,7 +259,7 @@ impl CandidateBackingJob { }; if let Some(signed_statement) = self.sign_statement(statement) { - self.import_statement(signed_statement.clone()).await?; + self.import_statement(&signed_statement).await?; self.distribute_signed_statement(signed_statement).await?; } @@ -271,14 +278,21 @@ impl CandidateBackingJob { .map(|(id, vote)| (id, vote.into())) .unzip(); + let group = match self.table_context.groups.get(&self.assignment) { + Some(group) => group, + None => continue, + }; + let mut validator_indices = BitVec::with_capacity( - self.table_context.validators.len(), + group.len() ); - validator_indices.resize(self.table_context.validators.len(), false); + validator_indices.resize(group.len(), false); for id in ids.iter() { - validator_indices.set(*id as usize, true); + if let Some(position) = group.iter().position(|x| x == id) { + validator_indices.set(position, true); + } } let backed = BackedCandidate { @@ -293,6 +307,7 @@ impl CandidateBackingJob { res } + /// Check if there have happened any new misbehaviors and issue necessary messages. async fn issue_new_misbehaviors(&mut self) -> Result<(), Error> { let mut reports = Vec::new(); @@ -324,28 +339,18 @@ impl CandidateBackingJob { Ok(()) } - async fn import_statement(&mut self, statement: SignedFullStatement) -> Result<(), Error> { - let idx = statement.validator_index() as usize; - - if self.table_context.validators.len() > idx { - match statement.check_signature( - &self.table_context.signing_context, - &self.table_context.validators[idx], - ) { - Ok(()) => { - let stmt = primitive_statement_to_table(&statement); + /// Import a statement into the statement table and return the summary of the import. + async fn import_statement( + &mut self, + statement: &SignedFullStatement, + ) -> Result, Error> { + let stmt = primitive_statement_to_table(statement); - self.table.import_statement(&self.table_context, stmt); + let summary = self.table.import_statement(&self.table_context, stmt); - self.issue_new_misbehaviors().await?; - } - Err(()) => { - return Err(Error::ValidationFailed(ValidationFailed)); - } - } - }; + self.issue_new_misbehaviors().await?; - Ok(()) + return Ok(summary); } async fn process_msg(&mut self, msg: CandidateBackingMessage) -> Result<(), Error> { @@ -373,6 +378,7 @@ impl CandidateBackingJob { } } CandidateBackingMessage::Statement(_, statement) => { + self.check_statement_signature(&statement)?; self.maybe_validate_and_import(statement).await?; } CandidateBackingMessage::GetBackedCandidates(_, tx) => { @@ -385,41 +391,44 @@ impl CandidateBackingJob { Ok(()) } - async fn maybe_validate_and_import( + /// Kick off validation work and distribute the result as a signed statement. + async fn kick_off_validation_work( &mut self, - statement: SignedFullStatement, + summary: TableSummary, + candidate: AbridgedCandidateReceipt, ) -> Result<(), Error> { - let idx = statement.validator_index() as usize; + let pov = self.request_pov_from_distribution(candidate.to_descriptor()).await?; + let v = self.request_candidate_validation(candidate.clone(), pov).await?; - if self.table_context.validators.len() > idx { - if let Err(()) = statement.check_signature( - &self.table_context.signing_context, - &self.table_context.validators[idx], - ) { - return Err(Error::InvalidSignature); + let statement = match v { + ValidationResult::Valid => { + let candidate_hash = candidate.hash(); + self.issued_validity.insert(candidate_hash); + Statement::Valid(candidate_hash) } - } else { - return Err(Error::InvalidSignature); - } + ValidationResult::Invalid => Statement::Invalid(candidate.hash()), + }; - if let Statement::Seconded(candidate) = statement.payload() { - if candidate.parachain_index == self.assignment { - let pov = self.request_pov_from_distribution(candidate.clone().into()).await?; - let v = self.request_candidate_validation(candidate.clone(), pov).await?; + if let Some(signed_statement) = self.sign_statement(statement) { + self.distribute_signed_statement(signed_statement).await?; + } - let statement = match v { - ValidationResult::Valid => Statement::Valid(candidate.hash()), - ValidationResult::Invalid => Statement::Invalid(candidate.hash()), - }; + Ok(()) + } - if let Some(signed_statement) = self.sign_statement(statement) { - self.distribute_signed_statement(signed_statement).await?; + /// Import the statement and kick off validation work if it is a part of our assignment. + async fn maybe_validate_and_import( + &mut self, + statement: SignedFullStatement, + ) -> Result<(), Error> { + if let Some(summary) = self.import_statement(&statement).await? { + if let Statement::Seconded(candidate) = statement.into_payload() { + if summary.group_id == self.assignment { + self.kick_off_validation_work(summary, candidate).await?; } } } - self.import_statement(statement).await?; - Ok(()) } @@ -438,6 +447,21 @@ impl CandidateBackingJob { Some(signed_statement) } + fn check_statement_signature(&self, statement: &SignedFullStatement) -> Result<(), Error> { + let idx = statement.validator_index() as usize; + + if self.table_context.validators.len() > idx { + statement.check_signature( + &self.table_context.signing_context, + &self.table_context.validators[idx], + ).map_err(|_| Error::InvalidSignature)?; + } else { + return Err(Error::InvalidSignature); + } + + Ok(()) + } + async fn send_to_provisioner(&mut self, msg: ProvisionerMessage) -> Result<(), Error> { self.tx_from.send(FromJob::Provisioner(msg)).await?; @@ -468,6 +492,7 @@ impl CandidateBackingJob { CandidateValidationMessage::Validate( self.parent, candidate, + self.head_data.clone(), pov, tx, ) @@ -477,6 +502,53 @@ impl CandidateBackingJob { Ok(rx.await??) } + async fn store_chunk( + &mut self, + id: ValidatorIndex, + chunk: ErasureChunk, + ) -> Result<(), Error> { + self.tx_from.send(FromJob::AvailabilityStore( + AvailabilityStoreMessage::StoreChunk(self.parent, id, chunk) + ) + ).await?; + + Ok(()) + } + + async fn make_pov_available( + &mut self, + pov_block: PoVBlock, + ) -> Result<(), Error> { + let omitted_validation = OmittedValidationData { + global_validation: self.global_validation_schedule.clone(), + local_validation: self.local_validation_data.clone(), + }; + + let available_data = AvailableData { + pov_block, + omitted_validation, + }; + + let chunks = erasure_coding::obtain_chunks( + self.table_context.validators.len(), + &available_data, + )?; + + let branches = erasure_coding::branches(chunks.as_ref()); + + for (index, (chunk, proof)) in chunks.iter().zip(branches.map(|(proof, _)| proof)).enumerate() { + let chunk = ErasureChunk { + chunk: chunk.clone(), + index: index as u32, + proof, + }; + + self.store_chunk(index as ValidatorIndex, chunk).await?; + } + + Ok(()) + } + async fn distribute_signed_statement(&mut self, s: SignedFullStatement) -> Result<(), Error> { let smsg = StatementDistributionMessage::Share(self.parent, s); @@ -533,7 +605,7 @@ async fn run_job( if let Some(g) = roster.validator_groups.get(assignment.group_idx.0 as usize) { groups.insert( assignment.para_id, - HashSet::from_iter(g.iter().copied()), + g.clone(), ); } } @@ -550,10 +622,12 @@ async fn run_job( } } - let validation_code = request_validation_code(parent, &mut tx_from, assignment, 0, None).await?; let head_data = request_head_data(parent, &mut tx_from, assignment).await?; let signing_context = request_signing_context(parent, &mut tx_from).await?; + let local_validation_data = request_local_validation_data(parent, assignment, &mut tx_from).await?; + let global_validation_schedule = request_global_validation_schedule(parent, &mut tx_from).await?; + let table_context = TableContext { signing_context, key: Some(key), @@ -565,8 +639,9 @@ async fn run_job( parent, rx_to, tx_from, - _validation_code: validation_code, - _head_data: head_data, + head_data, + global_validation_schedule, + local_validation_data, assignment, issued_validity: HashSet::new(), seconded: None, @@ -578,52 +653,66 @@ async fn run_job( job.run().await } -/// Request a validator set from the `RuntimeApi`. -async fn request_validators( +/// Request a `GlobalValidationSchedule` from `RuntimeApi`. +async fn request_global_validation_schedule( parent: Hash, s: &mut mpsc::Sender, -) -> Result, Error> { +) -> Result { let (tx, rx) = oneshot::channel(); s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( - parent, - RuntimeApiRequest::Validators(tx), - ) + parent, + RuntimeApiRequest::GlobalValidationSchedule(tx), + ) )).await?; Ok(rx.await?) } -/// Request the scheduler roster from `RuntimeApi`. -async fn request_validator_groups( + +/// Request a `LocalValidationData` from `RuntimeApi`. +async fn request_local_validation_data( parent: Hash, + para_id: ParaId, s: &mut mpsc::Sender, -) -> Result { +) -> Result { + let (tx, rx) = oneshot::channel(); + + s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::LocalValidationData(para_id, tx), + ) + )).await?; + + Ok(rx.await?.ok_or(Error::LocalValidationDataMissing)?) +} + +/// Request a validator set from the `RuntimeApi`. +async fn request_validators( + parent: Hash, + s: &mut mpsc::Sender, +) -> Result, Error> { let (tx, rx) = oneshot::channel(); s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( parent, - RuntimeApiRequest::ValidatorGroups(tx), + RuntimeApiRequest::Validators(tx), ) )).await?; Ok(rx.await?) } -/// Request the validation code for some `ParaId` at given block -/// from `RuntimeApi`. -async fn request_validation_code( +/// Request the scheduler roster from `RuntimeApi`. +async fn request_validator_groups( parent: Hash, s: &mut mpsc::Sender, - id: ParaId, - parent_number: BlockNumber, - parablock: Option, -) -> Result { +) -> Result { let (tx, rx) = oneshot::channel(); s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( parent, - RuntimeApiRequest::ValidationCode(id, parent_number, parablock, tx), + RuntimeApiRequest::ValidatorGroups(tx), ) )).await?; @@ -855,8 +944,9 @@ mod tests { keystore: KeyStorePtr, validators: Vec, validator_public: Vec, + global_validation_schedule: GlobalValidationSchedule, + local_validation_data: LocalValidationData, roster: SchedulerRoster, - validation_code: HashMap, head_data: HashMap, signing_context: SigningContext, relay_parent: Hash, @@ -908,7 +998,7 @@ mod tests { group_idx: GroupIndex::from(2), }; - let validator_groups = vec![vec![0, 2], vec![1, 3], vec![4]]; + let validator_groups = vec![vec![2, 0, 3], vec![1], vec![4]]; let parent_hash_1 = [1; 32].into(); @@ -930,19 +1020,29 @@ mod tests { let mut head_data = HashMap::new(); head_data.insert(chain_a, HeadData(vec![4, 5, 6])); - let mut validation_code = HashMap::new(); - validation_code.insert(chain_a, ValidationCode(vec![1, 2, 3])); - let relay_parent = Hash::from([5; 32]); + let local_validation_data = LocalValidationData { + parent_head: HeadData(vec![7, 8, 9]), + balance: Default::default(), + code_upgrade_allowed: None, + }; + + let global_validation_schedule = GlobalValidationSchedule { + max_code_size: 1000, + max_head_data_size: 1000, + block_number: Default::default(), + }; + Self { chain_ids, keystore, validators, validator_public, roster, - validation_code, head_data, + local_validation_data, + global_validation_schedule, signing_context, relay_parent, } @@ -1000,33 +1100,46 @@ mod tests { } ); - // Check that subsystem job issues a request for the validation code. + // Check that subsystem job issues a request for the head data. assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidationCode(id, _, _, tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::HeadData(id, tx)) ) if parent == test_state.relay_parent => { - tx.send(test_state.validation_code.get(&id).unwrap().clone()).unwrap(); + tx.send(test_state.head_data.get(&id).unwrap().clone()).unwrap(); } ); - // Check that subsystem job issues a request for the head data. + // Check that subsystem job issues a request for the signing context. assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::HeadData(id, tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SigningContext(tx)) ) if parent == test_state.relay_parent => { - tx.send(test_state.head_data.get(&id).unwrap().clone()).unwrap(); + tx.send(test_state.signing_context.clone()).unwrap(); } ); - // Check that subsystem job issues a request for the signing context. + // Check that subsystem job requests a local validation data for assignment. assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::SigningContext(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::LocalValidationData(_id, tx)) ) if parent == test_state.relay_parent => { - tx.send(test_state.signing_context.clone()).unwrap(); + tx.send(Some(test_state.local_validation_data.clone())).unwrap(); + } + ); + + // Check that subsystem job requests a global validation schedule for assignment. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::GlobalValidationSchedule(tx), + ) + ) if parent == test_state.relay_parent => { + tx.send(test_state.global_validation_schedule.clone()).unwrap(); } ); } @@ -1061,21 +1174,34 @@ mod tests { virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( CandidateValidationMessage::Validate( parent_hash, c, + head_data, pov, tx, ) ) if parent_hash == test_state.relay_parent && pov == pov_block && c == candidate => { + assert_eq!(head_data, *expected_head_data); tx.send(Ok(ValidationResult::Valid)).unwrap(); } ); + for _ in 0..test_state.validators.len() { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreChunk(parent_hash, _, _) + ) if parent_hash == test_state.relay_parent + ); + } + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1151,6 +1277,8 @@ mod tests { } ); + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + // The next step is the actual request to Validation subsystem // to validate the `Seconded` candidate. assert_matches!( @@ -1159,10 +1287,12 @@ mod tests { CandidateValidationMessage::Validate( relay_parent, candidate, + head_data, pov, tx, ) ) if relay_parent == test_state.relay_parent && candidate == candidate_a => { + assert_eq!(head_data, *expected_head_data); assert_eq!(pov, pov_block); tx.send(Ok(ValidationResult::Valid)).unwrap(); } @@ -1196,7 +1326,7 @@ mod tests { assert!(backed[0].0.validity_votes.contains( &ValidityAttestation::Implicit(signed_a.signature().clone()) )); - assert_eq!(backed[0].0.validator_indices, bitvec::bitvec![Lsb0, u8; 1, 0, 1, 0, 0]); + assert_eq!(backed[0].0.validator_indices, bitvec::bitvec![Lsb0, u8; 1, 1, 0]); virtual_overseer.send(FromOverseer::Signal( OverseerSignal::StopWork(test_state.relay_parent)) @@ -1262,17 +1392,21 @@ mod tests { } ); + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( CandidateValidationMessage::Validate( relay_parent, candidate, + head_data, pov, tx, ) ) if relay_parent == test_state.relay_parent && candidate == candidate_a => { assert_eq!(pov, pov_block); + assert_eq!(head_data, *expected_head_data); tx.send(Ok(ValidationResult::Valid)).unwrap(); } ); @@ -1372,17 +1506,21 @@ mod tests { virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( CandidateValidationMessage::Validate( parent_hash, c, + head_data, pov, tx, ) ) if parent_hash == test_state.relay_parent && pov == pov_block_a && c == candidate_a => { + assert_eq!(head_data, *expected_head_data); tx.send(Ok(ValidationResult::Invalid)).unwrap(); } ); @@ -1414,21 +1552,34 @@ mod tests { virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( CandidateValidationMessage::Validate( parent_hash, c, + head_data, pov, tx, ) ) if parent_hash == test_state.relay_parent && pov == pov_block_b && c == candidate_b => { + assert_eq!(head_data, *expected_head_data); tx.send(Ok(ValidationResult::Valid)).unwrap(); } ); + for _ in 0..test_state.validators.len() { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreChunk(parent_hash, _, _) + ) if parent_hash == test_state.relay_parent + ); + } + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( diff --git a/node/overseer/examples/minimal-example.rs b/node/overseer/examples/minimal-example.rs index cdef0340d04f..aefb627f7f56 100644 --- a/node/overseer/examples/minimal-example.rs +++ b/node/overseer/examples/minimal-example.rs @@ -59,6 +59,7 @@ impl Subsystem1 { ctx.send_message(AllMessages::CandidateValidation( CandidateValidationMessage::Validate( + Default::default(), Default::default(), Default::default(), PoVBlock { diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 706ba58a5e22..bbbb2d88a74f 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -726,6 +726,7 @@ mod tests { ctx.send_message( AllMessages::CandidateValidation( CandidateValidationMessage::Validate( + Default::default(), Default::default(), Default::default(), PoVBlock { diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index 9a95723026e6..25313c09154e 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -79,7 +79,7 @@ pub enum MisbehaviorReport { /// These validator nodes disagree on this candidate's validity, please figure it out /// /// Most likely, the list of statments all agree except for the final one. That's not - /// guarjnteed, though; if somehow we become aware of lots of + /// guaranteed, though; if somehow we become aware of lots of /// statements disagreeing about the validity of a candidate before taking action, /// this message should be dispatched with all of them, in arbitrary order. /// diff --git a/node/subsystem/src/messages.rs b/node/subsystem/src/messages.rs index 0423d247704e..50c0b60752e3 100644 --- a/node/subsystem/src/messages.rs +++ b/node/subsystem/src/messages.rs @@ -28,7 +28,8 @@ use polkadot_primitives::{BlockNumber, Hash, Signature}; use polkadot_primitives::parachain::{ AbridgedCandidateReceipt, PoVBlock, ErasureChunk, BackedCandidate, Id as ParaId, SignedAvailabilityBitfield, SigningContext, ValidatorId, ValidationCode, ValidatorIndex, - CoreAssignment, CoreOccupied, HeadData, CandidateDescriptor, + CoreAssignment, CoreOccupied, HeadData, CandidateDescriptor, GlobalValidationSchedule, + LocalValidationData, }; use polkadot_node_primitives::{ MisbehaviorReport, SignedFullStatement, View, ProtocolId, ValidationResult, @@ -76,6 +77,7 @@ pub enum CandidateValidationMessage { Validate( Hash, AbridgedCandidateReceipt, + HeadData, PoVBlock, oneshot::Sender>, ), @@ -177,6 +179,10 @@ pub enum RuntimeApiRequest { ValidationCode(ParaId, BlockNumber, Option, oneshot::Sender), /// Get head data for a specific para. HeadData(ParaId, oneshot::Sender), + /// Get the local validation data. + LocalValidationData(ParaId, oneshot::Sender>), + /// Get the global validation schedule. + GlobalValidationSchedule(oneshot::Sender), } /// A message to the Runtime API subsystem. diff --git a/primitives/src/parachain.rs b/primitives/src/parachain.rs index 03ba77589840..54a18ed087b0 100644 --- a/primitives/src/parachain.rs +++ b/primitives/src/parachain.rs @@ -600,8 +600,19 @@ impl AbridgedCandidateReceipt { pov_block_hash: *pov_block_hash, } } -} + + /// Clone the relevant portions of the `AbridgedCandidateReceipt` to form a `CandidateDescriptor`. + pub fn to_descriptor(&self) -> CandidateDescriptor { + CandidateDescriptor { + parachain_index: self.parachain_index, + relay_parent: self.relay_parent, + collator: self.collator.clone(), + signature: self.signature.clone(), + pov_block_hash: self.pov_block_hash.clone(), + } + } +} impl PartialOrd for AbridgedCandidateReceipt { fn partial_cmp(&self, other: &Self) -> Option { From e4aed7cc5eead47ea31e5dd7100595099f3e260e Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Tue, 7 Jul 2020 23:06:04 +0300 Subject: [PATCH 13/18] Remove warnings --- node/core/backing/src/lib.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index af04efb444a5..2a53ea4b10ba 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -20,7 +20,6 @@ use std::collections::{HashMap, HashSet}; use std::convert::TryFrom; -use std::iter::FromIterator; use std::pin::Pin; use std::time::Duration; @@ -394,7 +393,6 @@ impl CandidateBackingJob { /// Kick off validation work and distribute the result as a signed statement. async fn kick_off_validation_work( &mut self, - summary: TableSummary, candidate: AbridgedCandidateReceipt, ) -> Result<(), Error> { let pov = self.request_pov_from_distribution(candidate.to_descriptor()).await?; @@ -424,7 +422,7 @@ impl CandidateBackingJob { if let Some(summary) = self.import_statement(&statement).await? { if let Statement::Seconded(candidate) = statement.into_payload() { if summary.group_id == self.assignment { - self.kick_off_validation_work(summary, candidate).await?; + self.kick_off_validation_work(candidate).await?; } } } From 05d7dc69b23b8b387cb7a38e4efcd2c3e342beae Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Tue, 7 Jul 2020 23:21:55 +0300 Subject: [PATCH 14/18] Use summary in kicking off validation --- node/core/backing/src/lib.rs | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 2a53ea4b10ba..c5cfd3478f3d 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -66,6 +66,7 @@ use statement_table::{ #[derive(Debug, derive_more::From)] enum Error { NotInValidatorSet, + CandidateNotFound, LocalValidationDataMissing, JobNotFound(Hash), InvalidSignature, @@ -393,25 +394,28 @@ impl CandidateBackingJob { /// Kick off validation work and distribute the result as a signed statement. async fn kick_off_validation_work( &mut self, - candidate: AbridgedCandidateReceipt, - ) -> Result<(), Error> { - let pov = self.request_pov_from_distribution(candidate.to_descriptor()).await?; - let v = self.request_candidate_validation(candidate.clone(), pov).await?; + summary: TableSummary, + ) -> Result { + let candidate = self.table.get_candidate(&summary.candidate).ok_or(Error::CandidateNotFound)?; + let candidate = candidate.clone(); + let descriptor = candidate.to_descriptor(); + let candidate_hash = candidate.hash(); + let pov = self.request_pov_from_distribution(descriptor).await?; + let v = self.request_candidate_validation(candidate, pov).await?; let statement = match v { ValidationResult::Valid => { - let candidate_hash = candidate.hash(); self.issued_validity.insert(candidate_hash); Statement::Valid(candidate_hash) } - ValidationResult::Invalid => Statement::Invalid(candidate.hash()), + ValidationResult::Invalid => Statement::Invalid(candidate_hash), }; if let Some(signed_statement) = self.sign_statement(statement) { self.distribute_signed_statement(signed_statement).await?; } - Ok(()) + Ok(v) } /// Import the statement and kick off validation work if it is a part of our assignment. @@ -420,9 +424,9 @@ impl CandidateBackingJob { statement: SignedFullStatement, ) -> Result<(), Error> { if let Some(summary) = self.import_statement(&statement).await? { - if let Statement::Seconded(candidate) = statement.into_payload() { + if let Statement::Seconded(_) = statement.payload() { if summary.group_id == self.assignment { - self.kick_off_validation_work(candidate).await?; + self.kick_off_validation_work(summary).await?; } } } From 994eefde86961451d58e81b55df8305d642c39c6 Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Wed, 8 Jul 2020 12:16:01 +0300 Subject: [PATCH 15/18] Parallelize requests --- node/core/backing/src/lib.rs | 48 ++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 19 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index c5cfd3478f3d..8885802553d5 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -598,8 +598,11 @@ async fn run_job( rx_to: mpsc::Receiver, mut tx_from: mpsc::Sender, ) -> Result<(), Error> { - let validators = request_validators(parent, &mut tx_from).await?; - let roster = request_validator_groups(parent, &mut tx_from).await?; + let (validators, roster) = futures::try_join!( + request_validators(parent, &mut tx_from).await?, + request_validator_groups(parent, &mut tx_from).await?, + )?; + let key = signing_key(&validators[..], &keystore).ok_or(Error::NotInValidatorSet)?; let mut groups = HashMap::new(); @@ -624,11 +627,19 @@ async fn run_job( } } - let head_data = request_head_data(parent, &mut tx_from, assignment).await?; - let signing_context = request_signing_context(parent, &mut tx_from).await?; + let ( + head_data, + signing_context, + local_validation_data, + global_validation_schedule, + ) = futures::try_join!( + request_head_data(parent, &mut tx_from, assignment).await?, + request_signing_context(parent, &mut tx_from).await?, + request_local_validation_data(parent, assignment, &mut tx_from).await?, + request_global_validation_schedule(parent, &mut tx_from).await?, + )?; - let local_validation_data = request_local_validation_data(parent, assignment, &mut tx_from).await?; - let global_validation_schedule = request_global_validation_schedule(parent, &mut tx_from).await?; + let local_validation_data = local_validation_data.ok_or(Error::LocalValidationDataMissing)?; let table_context = TableContext { signing_context, @@ -659,7 +670,7 @@ async fn run_job( async fn request_global_validation_schedule( parent: Hash, s: &mut mpsc::Sender, -) -> Result { +) -> Result, Error> { let (tx, rx) = oneshot::channel(); s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( @@ -668,16 +679,15 @@ async fn request_global_validation_schedule( ) )).await?; - Ok(rx.await?) + Ok(rx) } - /// Request a `LocalValidationData` from `RuntimeApi`. async fn request_local_validation_data( parent: Hash, para_id: ParaId, s: &mut mpsc::Sender, -) -> Result { +) -> Result>, Error> { let (tx, rx) = oneshot::channel(); s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( @@ -686,14 +696,14 @@ async fn request_local_validation_data( ) )).await?; - Ok(rx.await?.ok_or(Error::LocalValidationDataMissing)?) + Ok(rx) } /// Request a validator set from the `RuntimeApi`. async fn request_validators( parent: Hash, s: &mut mpsc::Sender, -) -> Result, Error> { +) -> Result>, Error> { let (tx, rx) = oneshot::channel(); s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( @@ -702,14 +712,14 @@ async fn request_validators( ) )).await?; - Ok(rx.await?) + Ok(rx) } /// Request the scheduler roster from `RuntimeApi`. async fn request_validator_groups( parent: Hash, s: &mut mpsc::Sender, -) -> Result { +) -> Result, Error> { let (tx, rx) = oneshot::channel(); s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( @@ -718,14 +728,14 @@ async fn request_validator_groups( ) )).await?; - Ok(rx.await?) + Ok(rx) } /// Request a `SigningContext` from the `RuntimeApi`. async fn request_signing_context( parent: Hash, s: &mut mpsc::Sender, -) -> Result { +) -> Result, Error> { let (tx, rx) = oneshot::channel(); s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( @@ -734,7 +744,7 @@ async fn request_signing_context( ) )).await?; - Ok(rx.await?) + Ok(rx) } /// Request `HeadData` for some `ParaId` from `RuntimeApi`. @@ -742,7 +752,7 @@ async fn request_head_data( parent: Hash, s: &mut mpsc::Sender, id: ParaId, -) -> Result { +) -> Result, Error> { let (tx, rx) = oneshot::channel(); s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( @@ -751,7 +761,7 @@ async fn request_head_data( ) )).await?; - Ok(rx.await?) + Ok(rx) } impl Jobs { From d879df6abd5b79cfcc486668392812fa85316ae4 Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Wed, 8 Jul 2020 13:55:35 +0300 Subject: [PATCH 16/18] Validation provides local and global validation params --- node/core/backing/src/lib.rs | 117 ++++++------------ node/subsystem/src/messages.rs | 9 +- .../src/types/overseer-protocol.md | 22 +++- 3 files changed, 58 insertions(+), 90 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 8885802553d5..f96312d08b15 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -67,7 +67,6 @@ use statement_table::{ enum Error { NotInValidatorSet, CandidateNotFound, - LocalValidationDataMissing, JobNotFound(Hash), InvalidSignature, #[from] @@ -93,10 +92,6 @@ struct CandidateBackingJob { /// `HeadData`s of the parachains that this validator is assigned to. head_data: HeadData, - /// Global validation schedule. - global_validation_schedule: GlobalValidationSchedule, - /// Local validation data of the parachain this validator is assigned to. - local_validation_data: LocalValidationData, /// The `ParaId`s assigned to this validator. assignment: ParaId, /// We issued `Valid` statements on about these candidates. @@ -242,12 +237,12 @@ impl CandidateBackingJob { pov: PoVBlock, ) -> Result { let valid = self.request_candidate_validation(candidate.clone(), pov.clone()).await?; - let statement = match valid { + let statement = match valid.0 { ValidationResult::Valid => { // make PoV available for later distribution. Send data to the availability // store to keep. Sign and dispatch `valid` statement to network if we // have not seconded the given candidate. - self.make_pov_available(pov).await?; + self.make_pov_available(pov, valid.1, valid.2).await?; self.issued_validity.insert(candidate.hash()); Statement::Seconded(candidate) } @@ -263,7 +258,7 @@ impl CandidateBackingJob { self.distribute_signed_statement(signed_statement).await?; } - Ok(valid) + Ok(valid.0) } fn get_backed(&self) -> Vec { @@ -403,7 +398,7 @@ impl CandidateBackingJob { let pov = self.request_pov_from_distribution(descriptor).await?; let v = self.request_candidate_validation(candidate, pov).await?; - let statement = match v { + let statement = match v.0 { ValidationResult::Valid => { self.issued_validity.insert(candidate_hash); Statement::Valid(candidate_hash) @@ -415,7 +410,7 @@ impl CandidateBackingJob { self.distribute_signed_statement(signed_statement).await?; } - Ok(v) + Ok(v.0) } /// Import the statement and kick off validation work if it is a part of our assignment. @@ -487,7 +482,7 @@ impl CandidateBackingJob { &mut self, candidate: AbridgedCandidateReceipt, pov: PoVBlock, - ) -> Result { + ) -> Result<(ValidationResult, GlobalValidationSchedule, LocalValidationData), Error> { let (tx, rx) = oneshot::channel(); self.tx_from.send(FromJob::CandidateValidation( @@ -520,10 +515,12 @@ impl CandidateBackingJob { async fn make_pov_available( &mut self, pov_block: PoVBlock, + global_validation: GlobalValidationSchedule, + local_validation: LocalValidationData, ) -> Result<(), Error> { let omitted_validation = OmittedValidationData { - global_validation: self.global_validation_schedule.clone(), - local_validation: self.local_validation_data.clone(), + global_validation, + local_validation, }; let available_data = AvailableData { @@ -630,17 +627,11 @@ async fn run_job( let ( head_data, signing_context, - local_validation_data, - global_validation_schedule, ) = futures::try_join!( request_head_data(parent, &mut tx_from, assignment).await?, request_signing_context(parent, &mut tx_from).await?, - request_local_validation_data(parent, assignment, &mut tx_from).await?, - request_global_validation_schedule(parent, &mut tx_from).await?, )?; - let local_validation_data = local_validation_data.ok_or(Error::LocalValidationDataMissing)?; - let table_context = TableContext { signing_context, key: Some(key), @@ -653,8 +644,6 @@ async fn run_job( rx_to, tx_from, head_data, - global_validation_schedule, - local_validation_data, assignment, issued_validity: HashSet::new(), seconded: None, @@ -666,39 +655,6 @@ async fn run_job( job.run().await } -/// Request a `GlobalValidationSchedule` from `RuntimeApi`. -async fn request_global_validation_schedule( - parent: Hash, - s: &mut mpsc::Sender, -) -> Result, Error> { - let (tx, rx) = oneshot::channel(); - - s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( - parent, - RuntimeApiRequest::GlobalValidationSchedule(tx), - ) - )).await?; - - Ok(rx) -} - -/// Request a `LocalValidationData` from `RuntimeApi`. -async fn request_local_validation_data( - parent: Hash, - para_id: ParaId, - s: &mut mpsc::Sender, -) -> Result>, Error> { - let (tx, rx) = oneshot::channel(); - - s.send(FromJob::RuntimeApiMessage(RuntimeApiMessage::Request( - parent, - RuntimeApiRequest::LocalValidationData(para_id, tx), - ) - )).await?; - - Ok(rx) -} - /// Request a validator set from the `RuntimeApi`. async fn request_validators( parent: Hash, @@ -1131,29 +1087,6 @@ mod tests { tx.send(test_state.signing_context.clone()).unwrap(); } ); - - // Check that subsystem job requests a local validation data for assignment. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::LocalValidationData(_id, tx)) - ) if parent == test_state.relay_parent => { - tx.send(Some(test_state.local_validation_data.clone())).unwrap(); - } - ); - - // Check that subsystem job requests a global validation schedule for assignment. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request( - parent, - RuntimeApiRequest::GlobalValidationSchedule(tx), - ) - ) if parent == test_state.relay_parent => { - tx.send(test_state.global_validation_schedule.clone()).unwrap(); - } - ); } // Test that a `CandidateBackingMessage::Second` issues validation work @@ -1201,7 +1134,11 @@ mod tests { ) if parent_hash == test_state.relay_parent && pov == pov_block && c == candidate => { assert_eq!(head_data, *expected_head_data); - tx.send(Ok(ValidationResult::Valid)).unwrap(); + tx.send(Ok(( + ValidationResult::Valid, + test_state.global_validation_schedule, + test_state.local_validation_data, + ))).unwrap(); } ); @@ -1306,7 +1243,11 @@ mod tests { ) if relay_parent == test_state.relay_parent && candidate == candidate_a => { assert_eq!(head_data, *expected_head_data); assert_eq!(pov, pov_block); - tx.send(Ok(ValidationResult::Valid)).unwrap(); + tx.send(Ok(( + ValidationResult::Valid, + test_state.global_validation_schedule, + test_state.local_validation_data, + ))).unwrap(); } ); @@ -1419,7 +1360,11 @@ mod tests { ) if relay_parent == test_state.relay_parent && candidate == candidate_a => { assert_eq!(pov, pov_block); assert_eq!(head_data, *expected_head_data); - tx.send(Ok(ValidationResult::Valid)).unwrap(); + tx.send(Ok(( + ValidationResult::Valid, + test_state.global_validation_schedule, + test_state.local_validation_data, + ))).unwrap(); } ); @@ -1533,7 +1478,11 @@ mod tests { ) if parent_hash == test_state.relay_parent && pov == pov_block_a && c == candidate_a => { assert_eq!(head_data, *expected_head_data); - tx.send(Ok(ValidationResult::Invalid)).unwrap(); + tx.send(Ok(( + ValidationResult::Invalid, + test_state.global_validation_schedule.clone(), + test_state.local_validation_data.clone(), + ))).unwrap(); } ); @@ -1579,7 +1528,11 @@ mod tests { ) if parent_hash == test_state.relay_parent && pov == pov_block_b && c == candidate_b => { assert_eq!(head_data, *expected_head_data); - tx.send(Ok(ValidationResult::Valid)).unwrap(); + tx.send(Ok(( + ValidationResult::Valid, + test_state.global_validation_schedule, + test_state.local_validation_data, + ))).unwrap(); } ); diff --git a/node/subsystem/src/messages.rs b/node/subsystem/src/messages.rs index 50c0b60752e3..cbb5fd797eef 100644 --- a/node/subsystem/src/messages.rs +++ b/node/subsystem/src/messages.rs @@ -79,7 +79,10 @@ pub enum CandidateValidationMessage { AbridgedCandidateReceipt, HeadData, PoVBlock, - oneshot::Sender>, + oneshot::Sender>, ), } @@ -179,10 +182,6 @@ pub enum RuntimeApiRequest { ValidationCode(ParaId, BlockNumber, Option, oneshot::Sender), /// Get head data for a specific para. HeadData(ParaId, oneshot::Sender), - /// Get the local validation data. - LocalValidationData(ParaId, oneshot::Sender>), - /// Get the global validation schedule. - GlobalValidationSchedule(oneshot::Sender), } /// A message to the Runtime API subsystem. diff --git a/roadmap/implementers-guide/src/types/overseer-protocol.md b/roadmap/implementers-guide/src/types/overseer-protocol.md index 8fec32c1faef..7d1a215ea4b3 100644 --- a/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -286,10 +286,26 @@ enum StatementDistributionMessage { Various modules request that the [Candidate Validation subsystem](../node/utility/candidate-validation.md) validate a block with this message ```rust + +/// Result of the validation of the candidate. +enum ValidationResult { + /// Candidate is valid. + Valid, + /// Candidate is invalid. + Invalid, +} + enum CandidateValidationMessage { /// Validate a candidate with provided parameters. Returns `Err` if an only if an internal - /// error is encountered. A bad candidate will return `Ok(false)`, while a good one will - /// return `Ok(true)`. - Validate(ValidationCode, CandidateReceipt, PoV, ResponseChannel>), + /// error is encountered. + /// In case no internal error was encontered it returns a tuple containing the result of + /// validation and `GlobalValidationSchedule` and `LocalValidationData` structures that + /// may be used by the caller to make the candidate available. + /// A bad candidate will return `Ok((ValidationResult::Invalid, _, _)`, while a good one will + /// return `Ok((ValidationResult::Valid, _, _))`. + Validate( + Hash, CandidateReceipt, HeadData, PoV, ResponseChannel< + Result<(ValidationResult, GlobalValidationSchedule, LocalValidationData)> + >), } ``` From fef54e1cdad51251f30bb6d6266f994a814a4a0e Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Wed, 8 Jul 2020 18:21:30 +0300 Subject: [PATCH 17/18] Test issued validity tracking --- node/core/backing/src/lib.rs | 156 ++++++++++++++++++++++++++++++++++- 1 file changed, 153 insertions(+), 3 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index f96312d08b15..f0248002a466 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -94,7 +94,7 @@ struct CandidateBackingJob { head_data: HeadData, /// The `ParaId`s assigned to this validator. assignment: ParaId, - /// We issued `Valid` statements on about these candidates. + /// We issued `Valid` or `Invalid` statements on about these candidates. issued_validity: HashSet, /// `Some(h)` if this job has already issues `Seconded` statemt for some candidate with `h` hash. seconded: Option, @@ -400,12 +400,15 @@ impl CandidateBackingJob { let statement = match v.0 { ValidationResult::Valid => { - self.issued_validity.insert(candidate_hash); Statement::Valid(candidate_hash) } - ValidationResult::Invalid => Statement::Invalid(candidate_hash), + ValidationResult::Invalid => { + Statement::Invalid(candidate_hash) + } }; + self.issued_validity.insert(candidate_hash); + if let Some(signed_statement) = self.sign_statement(statement) { self.distribute_signed_statement(signed_statement).await?; } @@ -1567,4 +1570,151 @@ mod tests { ).await; }); } + + // Test that if we have already issued a statement (in this case `Invalid`) about a + // candidate we will not be issuing a `Seconded` statement on it. + #[test] + fn backing_multiple_statements_work() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + + test_startup(&mut virtual_overseer, &test_state).await; + + let pov_block = PoVBlock { + block_data: BlockData(vec![42, 43, 44]), + }; + + let pov_block_hash = pov_block.hash(); + + let candidate = AbridgedCandidateReceipt { + parachain_index: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_block_hash, + ..Default::default() + }; + + let candidate_hash = candidate.hash(); + + let signed_a = SignedFullStatement::sign( + Statement::Seconded(candidate.clone()), + &test_state.signing_context, + 2, + &test_state.validators[2].pair().into(), + ); + + // Send in a `Statement` with a candidate. + let statement = CandidateBackingMessage::Statement( + test_state.relay_parent, + signed_a.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + // Subsystem requests PoV and requests validation. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::PoVDistribution( + PoVDistributionMessage::FetchPoV(relay_parent, _, tx) + ) => { + assert_eq!(relay_parent, test_state.relay_parent); + tx.send(pov_block.clone()).unwrap(); + } + ); + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + // Tell subsystem that this candidate is invalid. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::Validate( + relay_parent, + candidate_recvd, + head_data, + pov, + tx, + ) + ) => { + assert_eq!(relay_parent, test_state.relay_parent); + assert_eq!(candidate_recvd, candidate); + assert_eq!(head_data, *expected_head_data); + assert_eq!(pov, pov_block); + tx.send(Ok(( + ValidationResult::Invalid, + test_state.global_validation_schedule, + test_state.local_validation_data, + ))).unwrap(); + } + ); + + // The invalid message is shared. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + relay_parent, + signed_statement, + ) + ) => { + assert_eq!(relay_parent, test_state.relay_parent); + signed_statement.check_signature( + &test_state.signing_context, + &test_state.validator_public[0], + ).unwrap(); + assert_eq!(*signed_statement.payload(), Statement::Invalid(candidate_hash)); + } + ); + + // Ask subsystem to `Second` a candidate that already has a statement issued about. + // This should emit no actions from subsystem. + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate.clone(), + pov_block.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; + + let pov_to_second = PoVBlock { + block_data: BlockData(vec![3, 2, 1]), + }; + + let pov_block_hash = pov_to_second.hash(); + + let candidate_to_second = AbridgedCandidateReceipt { + parachain_index: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_block_hash, + ..Default::default() + }; + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate_to_second.clone(), + pov_to_second.clone(), + ); + + // In order to trigger _some_ actions from subsystem ask it to second another + // candidate. The only reason to do so is to make sure that no actions were + // triggered on the prev step. + virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::Validate( + relay_parent, + _, + _, + pov, + _, + ) + ) => { + assert_eq!(relay_parent, test_state.relay_parent); + assert_eq!(pov, pov_to_second); + } + ); + }); + } } From e8fd8f336cbec9617a74e058681f8d2b04ccaa5c Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Thu, 9 Jul 2020 19:04:43 +0300 Subject: [PATCH 18/18] Nits from review --- node/core/backing/src/lib.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index f0248002a466..15b7ec8673b4 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -95,7 +95,7 @@ struct CandidateBackingJob { /// The `ParaId`s assigned to this validator. assignment: ParaId, /// We issued `Valid` or `Invalid` statements on about these candidates. - issued_validity: HashSet, + issued_statements: HashSet, /// `Some(h)` if this job has already issues `Seconded` statemt for some candidate with `h` hash. seconded: Option, /// We have already reported misbehaviors for these validators. @@ -243,7 +243,7 @@ impl CandidateBackingJob { // store to keep. Sign and dispatch `valid` statement to network if we // have not seconded the given candidate. self.make_pov_available(pov, valid.1, valid.2).await?; - self.issued_validity.insert(candidate.hash()); + self.issued_statements.insert(candidate.hash()); Statement::Seconded(candidate) } ValidationResult::Invalid => { @@ -303,6 +303,8 @@ impl CandidateBackingJob { } /// Check if there have happened any new misbehaviors and issue necessary messages. + /// + /// TODO: Report multiple misbehaviors (https://github.com/paritytech/polkadot/issues/1387) async fn issue_new_misbehaviors(&mut self) -> Result<(), Error> { let mut reports = Vec::new(); @@ -351,6 +353,11 @@ impl CandidateBackingJob { async fn process_msg(&mut self, msg: CandidateBackingMessage) -> Result<(), Error> { match msg { CandidateBackingMessage::Second(_, candidate, pov) => { + // Sanity check that candidate is from our assignment. + if candidate.parachain_index != self.assignment { + return Ok(()); + } + // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a // Seconded statement only if we have not seconded any other candidate and // have not signed a Valid statement for the requested candidate. @@ -359,7 +366,7 @@ impl CandidateBackingJob { None => { let candidate_hash = candidate.hash(); - if !self.issued_validity.contains(&candidate_hash) { + if !self.issued_statements.contains(&candidate_hash) { if let Ok(ValidationResult::Valid) = self.validate_and_second( candidate, pov, @@ -407,7 +414,7 @@ impl CandidateBackingJob { } }; - self.issued_validity.insert(candidate_hash); + self.issued_statements.insert(candidate_hash); if let Some(signed_statement) = self.sign_statement(statement) { self.distribute_signed_statement(signed_statement).await?; @@ -648,7 +655,7 @@ async fn run_job( tx_from, head_data, assignment, - issued_validity: HashSet::new(), + issued_statements: HashSet::new(), seconded: None, reported_misbehavior_for: HashSet::new(), table: Table::default(),