diff --git a/Cargo.lock b/Cargo.lock
index 4628968d309f..0a037ae7676a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5235,16 +5235,33 @@ dependencies = [
name = "polkadot-node-core-approval-voting"
version = "0.1.0"
dependencies = [
+ "assert_matches",
"bitvec",
"futures 0.3.12",
+ "futures-timer 3.0.2",
+ "maplit",
+ "merlin",
"parity-scale-codec",
+ "parking_lot 0.11.1",
"polkadot-node-primitives",
"polkadot-node-subsystem",
+ "polkadot-node-subsystem-test-helpers",
"polkadot-overseer",
"polkadot-primitives",
+ "rand_core 0.5.1",
"sc-client-api",
+ "sc-keystore",
+ "schnorrkel",
+ "sp-application-crypto",
"sp-blockchain",
+ "sp-consensus-babe",
"sp-consensus-slots",
+ "sp-core",
+ "sp-keyring",
+ "sp-keystore",
+ "sp-runtime",
+ "tracing",
+ "tracing-futures",
]
[[package]]
@@ -5412,11 +5429,13 @@ dependencies = [
"futures 0.3.12",
"memory-lru",
"parity-util-mem",
+ "polkadot-node-primitives",
"polkadot-node-subsystem",
"polkadot-node-subsystem-test-helpers",
"polkadot-node-subsystem-util",
"polkadot-primitives",
"sp-api",
+ "sp-consensus-babe",
"sp-core",
"tracing",
"tracing-futures",
@@ -5460,10 +5479,13 @@ dependencies = [
"parity-scale-codec",
"polkadot-primitives",
"polkadot-statement-table",
- "sp-consensus-slots",
+ "schnorrkel",
+ "sp-application-crypto",
+ "sp-consensus-babe",
"sp-consensus-vrf",
"sp-core",
"sp-runtime",
+ "thiserror",
]
[[package]]
diff --git a/node/core/approval-voting/Cargo.toml b/node/core/approval-voting/Cargo.toml
index 4054eea4ce09..35117355cbce 100644
--- a/node/core/approval-voting/Cargo.toml
+++ b/node/core/approval-voting/Cargo.toml
@@ -6,16 +6,33 @@ edition = "2018"
[dependencies]
futures = "0.3.8"
+futures-timer = "3.0.2"
parity-scale-codec = { version = "2.0.0", default-features = false, features = ["bit-vec", "derive"] }
+tracing = "0.1.22"
+tracing-futures = "0.2.4"
+bitvec = { version = "0.20.1", default-features = false, features = ["alloc"] }
+merlin = "2.0"
+schnorrkel = "0.9.1"
polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
polkadot-overseer = { path = "../../overseer" }
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-primitives = { path = "../../primitives" }
-bitvec = "0.20.1"
sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
sp-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, features = ["full_crypto"] }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
-[dev-dependencies]
\ No newline at end of file
+[dev-dependencies]
+parking_lot = "0.11.1"
+rand_core = "0.5.1" # should match schnorrkel
+sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" }
+maplit = "1.0.2"
+polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
+assert_matches = "1.4.0"
diff --git a/node/core/approval-voting/src/approval_checking.rs b/node/core/approval-voting/src/approval_checking.rs
new file mode 100644
index 000000000000..3b6d8f3540b8
--- /dev/null
+++ b/node/core/approval-voting/src/approval_checking.rs
@@ -0,0 +1,879 @@
+// Copyright 2020 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! Utilities for checking whether a candidate has been approved under a given block.
+
+use polkadot_node_primitives::approval::DelayTranche;
+use bitvec::slice::BitSlice;
+use bitvec::order::Lsb0 as BitOrderLsb0;
+
+use crate::persisted_entries::{ApprovalEntry, CandidateEntry};
+use crate::time::Tick;
+
+/// The required tranches of assignments needed to determine whether a candidate is approved.
+#[derive(Debug, PartialEq, Clone)]
+pub enum RequiredTranches {
+ /// All validators appear to be required, based on tranches already taken and remaining
+ /// no-shows.
+ All,
+ /// More tranches required - We're awaiting more assignments.
+ Pending {
+ /// The highest considered delay tranche when counting assignments.
+ considered: DelayTranche,
+ /// The tick at which the next no-show, of the assignments counted, would occur.
+ next_no_show: Option,
+ /// The highest tranche to consider when looking to broadcast own assignment.
+ /// This should be considered along with the clock drift to avoid broadcasting
+ /// assignments that are before the local time.
+ maximum_broadcast: DelayTranche,
+ /// The clock drift, in ticks, to apply to the local clock when determining whether
+ /// to broadcast an assignment or when to schedule a wakeup. The local clock should be treated
+ /// as though it is `clock_drift` ticks earlier.
+ clock_drift: Tick,
+ },
+ /// An exact number of required tranches and a number of no-shows. This indicates that
+ /// at least the amount of `needed_approvals` are assigned and additionally all no-shows
+ /// are covered.
+ Exact {
+ /// The tranche to inspect up to.
+ needed: DelayTranche,
+ /// The amount of missing votes that should be tolerated.
+ tolerated_missing: usize,
+ /// When the next no-show would be, if any. This is used to schedule the next wakeup in the
+ /// event that there are some assignments that don't have corresponding approval votes. If this
+ /// is `None`, all assignments have approvals.
+ next_no_show: Option,
+ }
+}
+
+/// Check the approval of a candidate.
+pub fn check_approval(
+ candidate: &CandidateEntry,
+ approval: &ApprovalEntry,
+ required: RequiredTranches,
+) -> bool {
+ match required {
+ RequiredTranches::Pending { .. } => false,
+ RequiredTranches::All => {
+ let approvals = candidate.approvals();
+ 3 * approvals.count_ones() > 2 * approvals.len()
+ }
+ RequiredTranches::Exact { needed, tolerated_missing, .. } => {
+ // whether all assigned validators up to `needed` less no_shows have approved.
+ // e.g. if we had 5 tranches and 1 no-show, we would accept all validators in
+ // tranches 0..=5 except for 1 approving. In that example, we also accept all
+ // validators in tranches 0..=5 approving, but that would indicate that the
+ // RequiredTranches value was incorrectly constructed, so it is not realistic.
+ // If there are more missing approvals than there are no-shows, that indicates
+ // that there are some assignments which are not yet no-shows, but may become
+ // no-shows.
+
+ let mut assigned_mask = approval.assignments_up_to(needed);
+ let approvals = candidate.approvals();
+
+ let n_assigned = assigned_mask.count_ones();
+
+ // Filter the amount of assigned validators by those which have approved.
+ assigned_mask &= approvals.iter().by_val();
+ let n_approved = assigned_mask.count_ones();
+
+ // note: the process of computing `required` only chooses `exact` if
+ // that will surpass a minimum amount of checks.
+ // shouldn't typically go above, since all no-shows are supposed to be covered.
+ n_approved + tolerated_missing >= n_assigned
+ }
+ }
+}
+
+// Determining the amount of tranches required for approval or which assignments are pending
+// involves moving through a series of states while looping over the tranches
+//
+// that we are aware of. First, we perform an initial count of the number of assignments
+// until we reach the number of needed assignments for approval. As we progress, we count the
+// number of no-shows in each tranche.
+//
+// Then, if there are any no-shows, we proceed into a series of subsequent states for covering
+// no-shows.
+//
+// We cover each no-show by a non-empty tranche, keeping track of the amount of further
+// no-shows encountered along the way. Once all of the no-shows we were previously aware
+// of are covered, we then progress to cover the no-shows we encountered while covering those,
+// and so on.
+#[derive(Debug)]
+struct State {
+ /// The total number of assignments obtained.
+ assignments: usize,
+ /// The depth of no-shows we are currently covering.
+ depth: usize,
+ /// The amount of no-shows that have been covered at the previous or current depths.
+ covered: usize,
+ /// The amount of assignments that we are attempting to cover at this depth.
+ ///
+ /// At depth 0, these are the initial needed approvals, and at other depths these
+ /// are no-shows.
+ covering: usize,
+ /// The number of uncovered no-shows encountered at this depth. These will be the
+ /// `covering` of the next depth.
+ uncovered: usize,
+ /// The next tick at which a no-show would occur, if any.
+ next_no_show: Option,
+}
+
+impl State {
+ fn output(
+ &self,
+ tranche: DelayTranche,
+ needed_approvals: usize,
+ n_validators: usize,
+ no_show_duration: Tick,
+ ) -> RequiredTranches {
+ let covering = if self.depth == 0 { 0 } else { self.covering };
+ if self.depth != 0 && self.assignments + covering + self.uncovered >= n_validators {
+ return RequiredTranches::All;
+ }
+
+ // If we have enough assignments and all no-shows are covered, we have reached the number
+ // of tranches that we need to have.
+ if self.assignments >= needed_approvals && (covering + self.uncovered) == 0 {
+ return RequiredTranches::Exact {
+ needed: tranche,
+ tolerated_missing: self.covered,
+ next_no_show: self.next_no_show,
+ };
+ }
+
+ // We're pending more assignments and should look at more tranches.
+ let clock_drift = self.clock_drift(no_show_duration);
+ if self.depth == 0 {
+ RequiredTranches::Pending {
+ considered: tranche,
+ next_no_show: self.next_no_show,
+ // during the initial assignment-gathering phase, we want to accept assignments
+ // from any tranche. Note that honest validators will still not broadcast their
+ // assignment until it is time to do so, regardless of this value.
+ maximum_broadcast: DelayTranche::max_value(),
+ clock_drift,
+ }
+ } else {
+ RequiredTranches::Pending {
+ considered: tranche,
+ next_no_show: self.next_no_show,
+ maximum_broadcast: tranche + (covering + self.uncovered) as DelayTranche,
+ clock_drift,
+ }
+ }
+ }
+
+ fn clock_drift(&self, no_show_duration: Tick) -> Tick {
+ self.depth as Tick * no_show_duration
+ }
+
+ fn advance(
+ &self,
+ new_assignments: usize,
+ new_no_shows: usize,
+ next_no_show: Option,
+ ) -> State {
+ let new_covered = if self.depth == 0 {
+ new_assignments
+ } else {
+ // When covering no-shows, we treat each non-empty tranche as covering 1 assignment,
+ // regardless of how many assignments are within the tranche.
+ new_assignments.min(1)
+ };
+
+ let assignments = self.assignments + new_assignments;
+ let covering = self.covering.saturating_sub(new_covered);
+ let covered = if self.depth == 0 {
+ // If we're at depth 0, we're not actually covering no-shows,
+ // so we don't need to count them as such.
+ 0
+ } else {
+ self.covered + new_covered
+ };
+ let uncovered = self.uncovered + new_no_shows;
+ let next_no_show = super::min_prefer_some(
+ self.next_no_show,
+ next_no_show,
+ );
+
+ let (depth, covering, uncovered) = if covering == 0 {
+ if uncovered == 0 {
+ (self.depth, 0, uncovered)
+ } else {
+ (self.depth + 1, uncovered, 0)
+ }
+ } else {
+ (self.depth, covering, uncovered)
+ };
+
+ State { assignments, depth, covered, covering, uncovered, next_no_show }
+ }
+}
+
+/// Determine the amount of tranches of assignments needed to determine approval of a candidate.
+pub fn tranches_to_approve(
+ approval_entry: &ApprovalEntry,
+ approvals: &BitSlice,
+ tranche_now: DelayTranche,
+ block_tick: Tick,
+ no_show_duration: Tick,
+ needed_approvals: usize,
+) -> RequiredTranches {
+ let tick_now = tranche_now as Tick + block_tick;
+ let n_validators = approval_entry.n_validators();
+
+ let initial_state = State {
+ assignments: 0,
+ depth: 0,
+ covered: 0,
+ covering: needed_approvals,
+ uncovered: 0,
+ next_no_show: None,
+ };
+
+ // The `ApprovalEntry` doesn't have any data for empty tranches. We still want to iterate over
+ // these empty tranches, so we create an iterator to fill the gaps.
+ //
+ // This iterator has an infinitely long amount of non-empty tranches appended to the end.
+ let tranches_with_gaps_filled = {
+ let mut gap_end = 0;
+
+ let approval_entries_filled = approval_entry.tranches()
+ .iter()
+ .flat_map(move |tranche_entry| {
+ let tranche = tranche_entry.tranche();
+ let assignments = tranche_entry.assignments();
+
+ let gap_start = gap_end + 1;
+ gap_end = tranche;
+
+ (gap_start..tranche).map(|i| (i, &[] as &[_]))
+ .chain(std::iter::once((tranche, assignments)))
+ });
+
+ let pre_end = approval_entry.tranches().first().map(|t| t.tranche());
+ let post_start = approval_entry.tranches().last().map_or(0, |t| t.tranche() + 1);
+
+ let pre = pre_end.into_iter()
+ .flat_map(|pre_end| (0..pre_end).map(|i| (i, &[] as &[_])));
+ let post = (post_start..).map(|i| (i, &[] as &[_]));
+
+ pre.chain(approval_entries_filled).chain(post)
+ };
+
+ tranches_with_gaps_filled
+ .scan(Some(initial_state), |state, (tranche, assignments)| {
+ // The `Option` here is used for early exit.
+ let s = match state.take() {
+ None => return None,
+ Some(s) => s,
+ };
+
+ let clock_drift = s.clock_drift(no_show_duration);
+ let drifted_tick_now = tick_now.saturating_sub(clock_drift);
+ let drifted_tranche_now = drifted_tick_now.saturating_sub(block_tick) as DelayTranche;
+
+ // Break the loop once we've taken enough tranches.
+ // Note that we always take tranche 0 as `drifted_tranche_now` cannot be less than 0.
+ if tranche > drifted_tranche_now {
+ return None;
+ }
+
+ let n_assignments = assignments.len();
+
+ // count no-shows. An assignment is a no-show if there is no corresponding approval vote
+ // after a fixed duration.
+ //
+ // While we count the no-shows, we also determine the next possible no-show we might
+ // see within this tranche.
+ let mut next_no_show = None;
+ let no_shows = {
+ let next_no_show = &mut next_no_show;
+ assignments.iter()
+ .map(|(v_index, tick)| (v_index, tick.saturating_sub(clock_drift) + no_show_duration))
+ .filter(|&(v_index, no_show_at)| {
+ let has_approved = approvals.get(*v_index as usize).map(|b| *b).unwrap_or(false);
+
+ let is_no_show = !has_approved && no_show_at <= drifted_tick_now;
+
+ if !is_no_show && !has_approved {
+ *next_no_show = super::min_prefer_some(
+ *next_no_show,
+ Some(no_show_at + clock_drift),
+ );
+ }
+
+ is_no_show
+ }).count()
+ };
+
+ let s = s.advance(n_assignments, no_shows, next_no_show);
+ let output = s.output(tranche, needed_approvals, n_validators, no_show_duration);
+
+ *state = match output {
+ RequiredTranches::Exact { .. } | RequiredTranches::All => {
+ // Wipe the state clean so the next iteration of this closure will terminate
+ // the iterator. This guarantees that we can call `last` further down to see
+ // either a `Finished` or `Pending` result
+ None
+ }
+ RequiredTranches::Pending { .. } => {
+ // Pending results are only interesting when they are the last result of the iterator
+ // i.e. we never achieve a satisfactory level of assignment.
+ Some(s)
+ }
+ };
+
+ Some(output)
+ })
+ .last()
+ .expect("the underlying iterator is infinite, starts at 0, and never exits early before tranche 1; qed")
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ use polkadot_primitives::v1::GroupIndex;
+ use bitvec::bitvec;
+ use bitvec::order::Lsb0 as BitOrderLsb0;
+
+ use crate::approval_db;
+
+ #[test]
+ fn pending_is_not_approved() {
+ let candidate = approval_db::v1::CandidateEntry {
+ candidate: Default::default(),
+ session: 0,
+ block_assignments: Default::default(),
+ approvals: Default::default(),
+ }.into();
+
+ let approval_entry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ assignments: Default::default(),
+ our_assignment: None,
+ backing_group: GroupIndex(0),
+ approved: false,
+ }.into();
+
+ assert!(!check_approval(
+ &candidate,
+ &approval_entry,
+ RequiredTranches::Pending {
+ considered: 0,
+ next_no_show: None,
+ maximum_broadcast: 0,
+ clock_drift: 0,
+ },
+ ));
+ }
+
+ #[test]
+ fn all_requires_supermajority() {
+ let mut candidate: CandidateEntry = approval_db::v1::CandidateEntry {
+ candidate: Default::default(),
+ session: 0,
+ block_assignments: Default::default(),
+ approvals: bitvec![BitOrderLsb0, u8; 0; 10],
+ }.into();
+
+ for i in 0..6 {
+ candidate.mark_approval(i);
+ }
+
+ let approval_entry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ assignments: bitvec![BitOrderLsb0, u8; 1; 10],
+ our_assignment: None,
+ backing_group: GroupIndex(0),
+ approved: false,
+ }.into();
+
+ assert!(!check_approval(&candidate, &approval_entry, RequiredTranches::All));
+
+ candidate.mark_approval(6);
+ assert!(check_approval(&candidate, &approval_entry, RequiredTranches::All));
+ }
+
+ #[test]
+ fn exact_takes_only_assignments_up_to() {
+ let mut candidate: CandidateEntry = approval_db::v1::CandidateEntry {
+ candidate: Default::default(),
+ session: 0,
+ block_assignments: Default::default(),
+ approvals: bitvec![BitOrderLsb0, u8; 0; 10],
+ }.into();
+
+ for i in 0..6 {
+ candidate.mark_approval(i);
+ }
+
+ let approval_entry = approval_db::v1::ApprovalEntry {
+ tranches: vec![
+ approval_db::v1::TrancheEntry {
+ tranche: 0,
+ assignments: (0..4).map(|i| (i, 0.into())).collect(),
+ },
+ approval_db::v1::TrancheEntry {
+ tranche: 1,
+ assignments: (4..6).map(|i| (i, 1.into())).collect(),
+ },
+ approval_db::v1::TrancheEntry {
+ tranche: 2,
+ assignments: (6..10).map(|i| (i, 0.into())).collect(),
+ },
+ ],
+ assignments: bitvec![BitOrderLsb0, u8; 1; 10],
+ our_assignment: None,
+ backing_group: GroupIndex(0),
+ approved: false,
+ }.into();
+
+ assert!(check_approval(
+ &candidate,
+ &approval_entry,
+ RequiredTranches::Exact {
+ needed: 1,
+ tolerated_missing: 0,
+ next_no_show: None,
+ },
+ ));
+ assert!(!check_approval(
+ &candidate,
+ &approval_entry,
+ RequiredTranches::Exact {
+ needed: 2,
+ tolerated_missing: 0,
+ next_no_show: None,
+ },
+ ));
+ assert!(check_approval(
+ &candidate,
+ &approval_entry,
+ RequiredTranches::Exact {
+ needed: 2,
+ tolerated_missing: 4,
+ next_no_show: None,
+ },
+ ));
+ }
+
+ #[test]
+ fn tranches_to_approve_everyone_present() {
+ let block_tick = 0;
+ let no_show_duration = 10;
+ let needed_approvals = 4;
+
+ let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ assignments: bitvec![BitOrderLsb0, u8; 0; 5],
+ our_assignment: None,
+ backing_group: GroupIndex(0),
+ approved: false,
+ }.into();
+
+ approval_entry.import_assignment(0, 0, block_tick);
+ approval_entry.import_assignment(0, 1, block_tick);
+
+ approval_entry.import_assignment(1, 2, block_tick + 1);
+ approval_entry.import_assignment(1, 3, block_tick + 1);
+
+ approval_entry.import_assignment(2, 4, block_tick + 2);
+
+ let approvals = bitvec![BitOrderLsb0, u8; 1; 5];
+
+ assert_eq!(
+ tranches_to_approve(
+ &approval_entry,
+ &approvals,
+ 2,
+ block_tick,
+ no_show_duration,
+ needed_approvals,
+ ),
+ RequiredTranches::Exact { needed: 1, tolerated_missing: 0, next_no_show: None },
+ );
+ }
+
+ #[test]
+ fn tranches_to_approve_not_enough_initial_count() {
+ let block_tick = 20;
+ let no_show_duration = 10;
+ let needed_approvals = 4;
+
+ let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ assignments: bitvec![BitOrderLsb0, u8; 0; 10],
+ our_assignment: None,
+ backing_group: GroupIndex(0),
+ approved: false,
+ }.into();
+
+ approval_entry.import_assignment(0, 0, block_tick);
+ approval_entry.import_assignment(1, 2, block_tick);
+
+ let approvals = bitvec![BitOrderLsb0, u8; 0; 10];
+
+ let tranche_now = 2;
+ assert_eq!(
+ tranches_to_approve(
+ &approval_entry,
+ &approvals,
+ tranche_now,
+ block_tick,
+ no_show_duration,
+ needed_approvals,
+ ),
+ RequiredTranches::Pending {
+ considered: 2,
+ next_no_show: Some(block_tick + no_show_duration),
+ maximum_broadcast: DelayTranche::max_value(),
+ clock_drift: 0,
+ },
+ );
+ }
+
+ #[test]
+ fn tranches_to_approve_no_shows_before_initial_count_treated_same_as_not_initial() {
+ let block_tick = 20;
+ let no_show_duration = 10;
+ let needed_approvals = 4;
+
+ let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ assignments: bitvec![BitOrderLsb0, u8; 0; 10],
+ our_assignment: None,
+ backing_group: GroupIndex(0),
+ approved: false,
+ }.into();
+
+ approval_entry.import_assignment(0, 0, block_tick);
+ approval_entry.import_assignment(0, 1, block_tick);
+
+ approval_entry.import_assignment(1, 2, block_tick);
+
+ let mut approvals = bitvec![BitOrderLsb0, u8; 0; 10];
+ approvals.set(0, true);
+ approvals.set(1, true);
+
+ let tranche_now = no_show_duration as DelayTranche + 1;
+ assert_eq!(
+ tranches_to_approve(
+ &approval_entry,
+ &approvals,
+ tranche_now,
+ block_tick,
+ no_show_duration,
+ needed_approvals,
+ ),
+ RequiredTranches::Pending {
+ considered: 11,
+ next_no_show: None,
+ maximum_broadcast: DelayTranche::max_value(),
+ clock_drift: 0,
+ },
+ );
+ }
+
+ #[test]
+ fn tranches_to_approve_cover_no_show_not_enough() {
+ let block_tick = 20;
+ let no_show_duration = 10;
+ let needed_approvals = 4;
+ let n_validators = 8;
+
+ let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ assignments: bitvec![BitOrderLsb0, u8; 0; n_validators],
+ our_assignment: None,
+ backing_group: GroupIndex(0),
+ approved: false,
+ }.into();
+
+ approval_entry.import_assignment(0, 0, block_tick);
+ approval_entry.import_assignment(0, 1, block_tick);
+
+ approval_entry.import_assignment(1, 2, block_tick);
+ approval_entry.import_assignment(1, 3, block_tick);
+
+ let mut approvals = bitvec![BitOrderLsb0, u8; 0; n_validators];
+ approvals.set(0, true);
+ approvals.set(1, true);
+ // skip 2
+ approvals.set(3, true);
+
+ let tranche_now = no_show_duration as DelayTranche + 1;
+ assert_eq!(
+ tranches_to_approve(
+ &approval_entry,
+ &approvals,
+ tranche_now,
+ block_tick,
+ no_show_duration,
+ needed_approvals,
+ ),
+ RequiredTranches::Pending {
+ considered: 1,
+ next_no_show: None,
+ maximum_broadcast: 2, // tranche 1 + 1 no-show
+ clock_drift: 1 * no_show_duration,
+ }
+ );
+
+ approvals.set(0, false);
+
+ assert_eq!(
+ tranches_to_approve(
+ &approval_entry,
+ &approvals,
+ tranche_now,
+ block_tick,
+ no_show_duration,
+ needed_approvals,
+ ),
+ RequiredTranches::Pending {
+ considered: 1,
+ next_no_show: None,
+ maximum_broadcast: 3, // tranche 1 + 2 no-shows
+ clock_drift: 1 * no_show_duration,
+ }
+ );
+ }
+
+ #[test]
+ fn tranches_to_approve_multi_cover_not_enough() {
+ let block_tick = 20;
+ let no_show_duration = 10;
+ let needed_approvals = 4;
+ let n_validators = 8;
+
+ let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ assignments: bitvec![BitOrderLsb0, u8; 0; n_validators],
+ our_assignment: None,
+ backing_group: GroupIndex(0),
+ approved: false,
+ }.into();
+
+ approval_entry.import_assignment(0, 0, block_tick);
+ approval_entry.import_assignment(0, 1, block_tick);
+
+ approval_entry.import_assignment(1, 2, block_tick + 1);
+ approval_entry.import_assignment(1, 3, block_tick + 1);
+
+ approval_entry.import_assignment(2, 4, block_tick + no_show_duration + 2);
+ approval_entry.import_assignment(2, 5, block_tick + no_show_duration + 2);
+
+ let mut approvals = bitvec![BitOrderLsb0, u8; 0; n_validators];
+ approvals.set(0, true);
+ approvals.set(1, true);
+ // skip 2
+ approvals.set(3, true);
+ // skip 4
+ approvals.set(5, true);
+
+ let tranche_now = 1;
+ assert_eq!(
+ tranches_to_approve(
+ &approval_entry,
+ &approvals,
+ tranche_now,
+ block_tick,
+ no_show_duration,
+ needed_approvals,
+ ),
+ RequiredTranches::Exact {
+ needed: 1,
+ tolerated_missing: 0,
+ next_no_show: Some(block_tick + no_show_duration + 1),
+ },
+ );
+
+ // first no-show covered.
+ let tranche_now = no_show_duration as DelayTranche + 2;
+ assert_eq!(
+ tranches_to_approve(
+ &approval_entry,
+ &approvals,
+ tranche_now,
+ block_tick,
+ no_show_duration,
+ needed_approvals,
+ ),
+ RequiredTranches::Exact {
+ needed: 2,
+ tolerated_missing: 1,
+ next_no_show: Some(block_tick + 2*no_show_duration + 2),
+ },
+ );
+
+ // another no-show in tranche 2.
+ let tranche_now = (no_show_duration * 2) as DelayTranche + 2;
+ assert_eq!(
+ tranches_to_approve(
+ &approval_entry,
+ &approvals,
+ tranche_now,
+ block_tick,
+ no_show_duration,
+ needed_approvals,
+ ),
+ RequiredTranches::Pending {
+ considered: 2,
+ next_no_show: None,
+ maximum_broadcast: 3, // tranche 2 + 1 uncovered no-show.
+ clock_drift: 2 * no_show_duration,
+ },
+ );
+ }
+
+ #[test]
+ fn tranches_to_approve_cover_no_show() {
+ let block_tick = 20;
+ let no_show_duration = 10;
+ let needed_approvals = 4;
+ let n_validators = 8;
+
+ let mut approval_entry: ApprovalEntry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ assignments: bitvec![BitOrderLsb0, u8; 0; n_validators],
+ our_assignment: None,
+ backing_group: GroupIndex(0),
+ approved: false,
+ }.into();
+
+ approval_entry.import_assignment(0, 0, block_tick);
+ approval_entry.import_assignment(0, 1, block_tick);
+
+ approval_entry.import_assignment(1, 2, block_tick + 1);
+ approval_entry.import_assignment(1, 3, block_tick + 1);
+
+ approval_entry.import_assignment(2, 4, block_tick + no_show_duration + 2);
+ approval_entry.import_assignment(2, 5, block_tick + no_show_duration + 2);
+
+ let mut approvals = bitvec![BitOrderLsb0, u8; 0; n_validators];
+ approvals.set(0, true);
+ approvals.set(1, true);
+ // skip 2
+ approvals.set(3, true);
+ approvals.set(4, true);
+ approvals.set(5, true);
+
+ let tranche_now = no_show_duration as DelayTranche + 2;
+ assert_eq!(
+ tranches_to_approve(
+ &approval_entry,
+ &approvals,
+ tranche_now,
+ block_tick,
+ no_show_duration,
+ needed_approvals,
+ ),
+ RequiredTranches::Exact {
+ needed: 2,
+ tolerated_missing: 1,
+ next_no_show: None,
+ },
+ );
+
+ // Even though tranche 2 has 2 validators, it only covers 1 no-show.
+ // to cover a second no-show, we need to take another non-empty tranche.
+
+ approvals.set(0, false);
+
+ assert_eq!(
+ tranches_to_approve(
+ &approval_entry,
+ &approvals,
+ tranche_now,
+ block_tick,
+ no_show_duration,
+ needed_approvals,
+ ),
+ RequiredTranches::Pending {
+ considered: 2,
+ next_no_show: None,
+ maximum_broadcast: 3,
+ clock_drift: no_show_duration,
+ },
+ );
+
+ approval_entry.import_assignment(3, 6, block_tick);
+ approvals.set(6, true);
+
+ let tranche_now = no_show_duration as DelayTranche + 3;
+ assert_eq!(
+ tranches_to_approve(
+ &approval_entry,
+ &approvals,
+ tranche_now,
+ block_tick,
+ no_show_duration,
+ needed_approvals,
+ ),
+ RequiredTranches::Exact {
+ needed: 3,
+ tolerated_missing: 2,
+ next_no_show: None,
+ },
+ );
+ }
+}
+
+#[test]
+fn depth_0_covering_not_treated_as_such() {
+ let state = State {
+ assignments: 0,
+ depth: 0,
+ covered: 0,
+ covering: 10,
+ uncovered: 0,
+ next_no_show: None,
+ };
+
+ assert_eq!(
+ state.output(0, 10, 10, 20),
+ RequiredTranches::Pending {
+ considered: 0,
+ next_no_show: None,
+ maximum_broadcast: DelayTranche::max_value(),
+ clock_drift: 0,
+ },
+ );
+}
+
+#[test]
+fn depth_0_issued_as_exact_even_when_all() {
+ let state = State {
+ assignments: 10,
+ depth: 0,
+ covered: 0,
+ covering: 0,
+ uncovered: 0,
+ next_no_show: None,
+ };
+
+ assert_eq!(
+ state.output(0, 10, 10, 20),
+ RequiredTranches::Exact {
+ needed: 0,
+ tolerated_missing: 0,
+ next_no_show: None,
+ },
+ );
+}
diff --git a/node/core/approval-voting/src/approval_db/mod.rs b/node/core/approval-voting/src/approval_db/mod.rs
new file mode 100644
index 000000000000..8ea9b80e6095
--- /dev/null
+++ b/node/core/approval-voting/src/approval_db/mod.rs
@@ -0,0 +1,33 @@
+// Copyright 2020 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! Approval DB accessors and writers for on-disk persisted approval storage
+//! data.
+//!
+//! We persist data to disk although it is not intended to be used across runs of the
+//! program. This is because under medium to long periods of finality stalling, for whatever
+//! reason that may be, the amount of data we'd need to keep would be potentially too large
+//! for memory.
+//!
+//! With tens or hundreds of parachains, hundreds of validators, and parablocks
+//! in every relay chain block, there can be a humongous amount of information to reference
+//! at any given time.
+//!
+//! As such, we provide a function from this module to clear the database on start-up.
+//! In the future, we may use a temporary DB which doesn't need to be wiped, but for the
+//! time being we share the same DB with the rest of Substrate.
+
+pub mod v1;
diff --git a/node/core/approval-voting/src/aux_schema/mod.rs b/node/core/approval-voting/src/approval_db/v1/mod.rs
similarity index 78%
rename from node/core/approval-voting/src/aux_schema/mod.rs
rename to node/core/approval-voting/src/approval_db/v1/mod.rs
index 26c9cd5ba525..b7f8d09b9aa1 100644
--- a/node/core/approval-voting/src/aux_schema/mod.rs
+++ b/node/core/approval-voting/src/approval_db/v1/mod.rs
@@ -14,27 +14,10 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see .
-//! Auxiliary DB schema, accessors, and writers for on-disk persisted approval storage
-//! data.
-//!
-//! We persist data to disk although it is not intended to be used across runs of the
-//! program. This is because under medium to long periods of finality stalling, for whatever
-//! reason that may be, the amount of data we'd need to keep would be potentially too large
-//! for memory.
-//!
-//! With tens or hundreds of parachains, hundreds of validators, and parablocks
-//! in every relay chain block, there can be a humongous amount of information to reference
-//! at any given time.
-//!
-//! As such, we provide a function from this module to clear the database on start-up.
-//! In the future, we may use a temporary DB which doesn't need to be wiped, but for the
-//! time being we share the same DB with the rest of Substrate.
-
-// TODO https://github.com/paritytech/polkadot/issues/1975: remove this
-#![allow(unused)]
+//! Version 1 of the DB schema.
use sc_client_api::backend::AuxStore;
-use polkadot_node_primitives::approval::{DelayTranche, RelayVRF};
+use polkadot_node_primitives::approval::{DelayTranche, AssignmentCert};
use polkadot_primitives::v1::{
ValidatorIndex, GroupIndex, CandidateReceipt, SessionIndex, CoreIndex,
BlockNumber, Hash, CandidateHash,
@@ -46,73 +29,95 @@ use std::collections::{BTreeMap, HashMap};
use std::collections::hash_map::Entry;
use bitvec::{vec::BitVec, order::Lsb0 as BitOrderLsb0};
-use super::Tick;
-
#[cfg(test)]
mod tests;
+// slot_duration * 2 + DelayTranche gives the number of delay tranches since the
+// unix epoch.
+#[derive(Encode, Decode, Clone, Copy, Debug, PartialEq)]
+pub struct Tick(u64);
+
+pub type Bitfield = BitVec;
+
const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks";
+/// Details pertaining to our assignment on a block.
+#[derive(Encode, Decode, Debug, Clone, PartialEq)]
+pub struct OurAssignment {
+ pub cert: AssignmentCert,
+ pub tranche: DelayTranche,
+ pub validator_index: ValidatorIndex,
+ // Whether the assignment has been triggered already.
+ pub triggered: bool,
+}
+
/// Metadata regarding a specific tranche of assignments for a specific candidate.
-#[derive(Debug, Clone, Encode, Decode, PartialEq)]
-pub(crate) struct TrancheEntry {
- tranche: DelayTranche,
+#[derive(Encode, Decode, Debug, Clone, PartialEq)]
+pub struct TrancheEntry {
+ pub tranche: DelayTranche,
// Assigned validators, and the instant we received their assignment, rounded
// to the nearest tick.
- assignments: Vec<(ValidatorIndex, Tick)>,
+ pub assignments: Vec<(ValidatorIndex, Tick)>,
}
/// Metadata regarding approval of a particular candidate within the context of some
/// particular block.
-#[derive(Debug, Clone, Encode, Decode, PartialEq)]
-pub(crate) struct ApprovalEntry {
- tranches: Vec,
- backing_group: GroupIndex,
- // When the next wakeup for this entry should occur. This is either to
- // check a no-show or to check if we need to broadcast an assignment.
- next_wakeup: Tick,
- our_assignment: Option,
+#[derive(Encode, Decode, Debug, Clone, PartialEq)]
+pub struct ApprovalEntry {
+ pub tranches: Vec,
+ pub backing_group: GroupIndex,
+ pub our_assignment: Option,
// `n_validators` bits.
- assignments: BitVec,
- approved: bool,
+ pub assignments: Bitfield,
+ pub approved: bool,
}
/// Metadata regarding approval of a particular candidate.
-#[derive(Debug, Clone, Encode, Decode, PartialEq)]
-pub(crate) struct CandidateEntry {
- candidate: CandidateReceipt,
- session: SessionIndex,
+#[derive(Encode, Decode, Debug, Clone, PartialEq)]
+pub struct CandidateEntry {
+ pub candidate: CandidateReceipt,
+ pub session: SessionIndex,
// Assignments are based on blocks, so we need to track assignments separately
// based on the block we are looking at.
- block_assignments: BTreeMap,
- approvals: BitVec,
+ pub block_assignments: BTreeMap,
+ pub approvals: Bitfield,
}
/// Metadata regarding approval of a particular block, by way of approval of the
/// candidates contained within it.
-#[derive(Debug, Clone, Encode, Decode, PartialEq)]
-pub(crate) struct BlockEntry {
- block_hash: Hash,
- session: SessionIndex,
- slot: Slot,
- relay_vrf_story: RelayVRF,
+#[derive(Encode, Decode, Debug, Clone, PartialEq)]
+pub struct BlockEntry {
+ pub block_hash: Hash,
+ pub session: SessionIndex,
+ pub slot: Slot,
+ /// Random bytes derived from the VRF submitted within the block by the block
+ /// author as a credential and used as input to approval assignment criteria.
+ pub relay_vrf_story: [u8; 32],
// The candidates included as-of this block and the index of the core they are
// leaving. Sorted ascending by core index.
- candidates: Vec<(CoreIndex, CandidateHash)>,
+ pub candidates: Vec<(CoreIndex, CandidateHash)>,
// A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`.
// The i'th bit is `true` iff the candidate has been approved in the context of this
// block. The block can be considered approved if the bitfield has all bits set to `true`.
- approved_bitfield: BitVec,
- children: Vec,
+ pub approved_bitfield: Bitfield,
+ pub children: Vec,
}
/// A range from earliest..last block number stored within the DB.
-#[derive(Debug, Clone, Encode, Decode, PartialEq)]
-pub(crate) struct StoredBlockRange(BlockNumber, BlockNumber);
+#[derive(Encode, Decode, Debug, Clone, PartialEq)]
+pub struct StoredBlockRange(BlockNumber, BlockNumber);
+
+impl From for Tick {
+ fn from(tick: crate::Tick) -> Tick {
+ Tick(tick)
+ }
+}
-// TODO https://github.com/paritytech/polkadot/issues/1975: probably in lib.rs
-#[derive(Debug, Clone, Encode, Decode, PartialEq)]
-pub(crate) struct OurAssignment { }
+impl From for crate::Tick {
+ fn from(tick: Tick) -> crate::Tick {
+ tick.0
+ }
+}
/// Canonicalize some particular block, pruning everything before it and
/// pruning any competing branches at the same height.
@@ -351,9 +356,9 @@ fn load_decode(store: &impl AuxStore, key: &[u8])
/// candidate and approval entries.
#[derive(Clone)]
pub(crate) struct NewCandidateInfo {
- candidate: CandidateReceipt,
- backing_group: GroupIndex,
- our_assignment: Option,
+ pub candidate: CandidateReceipt,
+ pub backing_group: GroupIndex,
+ pub our_assignment: Option,
}
/// Record a new block entry.
@@ -364,7 +369,8 @@ pub(crate) struct NewCandidateInfo {
/// parent hash.
///
/// Has no effect if there is already an entry for the block or `candidate_info` returns
-/// `None` for any of the candidates referenced by the block entry.
+/// `None` for any of the candidates referenced by the block entry. In these cases,
+/// no information about new candidates will be referred to by this function.
pub(crate) fn add_block_entry(
store: &impl AuxStore,
parent_hash: Hash,
@@ -372,7 +378,7 @@ pub(crate) fn add_block_entry(
entry: BlockEntry,
n_validators: usize,
candidate_info: impl Fn(&CandidateHash) -> Option,
-) -> sp_blockchain::Result<()> {
+) -> sp_blockchain::Result> {
let session = entry.session;
let new_block_range = {
@@ -392,13 +398,15 @@ pub(crate) fn add_block_entry(
let mut blocks_at_height = load_blocks_at_height(store, number)?;
if blocks_at_height.contains(&entry.block_hash) {
// seems we already have a block entry for this block. nothing to do here.
- return Ok(())
+ return Ok(Vec::new())
}
blocks_at_height.push(entry.block_hash);
(blocks_at_height_key(number), blocks_at_height.encode())
};
+ let mut candidate_entries = Vec::with_capacity(entry.candidates.len());
+
let candidate_entry_updates = {
let mut updated_entries = Vec::with_capacity(entry.candidates.len());
for &(_, ref candidate_hash) in &entry.candidates {
@@ -407,7 +415,7 @@ pub(crate) fn add_block_entry(
backing_group,
our_assignment,
} = match candidate_info(candidate_hash) {
- None => return Ok(()),
+ None => return Ok(Vec::new()),
Some(info) => info,
};
@@ -424,7 +432,6 @@ pub(crate) fn add_block_entry(
ApprovalEntry {
tranches: Vec::new(),
backing_group,
- next_wakeup: 0,
our_assignment,
assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; n_validators],
approved: false,
@@ -434,6 +441,8 @@ pub(crate) fn add_block_entry(
updated_entries.push(
(candidate_entry_key(&candidate_hash), candidate_entry.encode())
);
+
+ candidate_entries.push((*candidate_hash, candidate_entry));
}
updated_entries
@@ -466,11 +475,61 @@ pub(crate) fn add_block_entry(
store.insert_aux(&all_keys_and_values, &[])?;
- Ok(())
+ Ok(candidate_entries)
+}
+
+// An atomic transaction of multiple candidate or block entries.
+#[derive(Default)]
+#[must_use = "Transactions do nothing unless written to a DB"]
+pub struct Transaction {
+ block_entries: HashMap,
+ candidate_entries: HashMap,
+}
+
+impl Transaction {
+ /// Put a block entry in the transaction, overwriting any other with the
+ /// same hash.
+ pub(crate) fn put_block_entry(&mut self, entry: BlockEntry) {
+ let hash = entry.block_hash;
+ let _ = self.block_entries.insert(hash, entry);
+ }
+
+ /// Put a candidate entry in the transaction, overwriting any other with the
+ /// same hash.
+ pub(crate) fn put_candidate_entry(&mut self, hash: CandidateHash, entry: CandidateEntry) {
+ let _ = self.candidate_entries.insert(hash, entry);
+ }
+
+ /// Write the contents of the transaction, atomically, to the DB.
+ pub(crate) fn write(self, db: &impl AuxStore) -> sp_blockchain::Result<()> {
+ if self.block_entries.is_empty() && self.candidate_entries.is_empty() {
+ return Ok(())
+ }
+
+ let blocks: Vec<_> = self.block_entries.into_iter().map(|(hash, entry)| {
+ let k = block_entry_key(&hash);
+ let v = entry.encode();
+
+ (k, v)
+ }).collect();
+
+ let candidates: Vec<_> = self.candidate_entries.into_iter().map(|(hash, entry)| {
+ let k = candidate_entry_key(&hash);
+ let v = entry.encode();
+
+ (k, v)
+ }).collect();
+
+ let kv = blocks.iter().map(|(k, v)| (&k[..], &v[..]))
+ .chain(candidates.iter().map(|(k, v)| (&k[..], &v[..])))
+ .collect::>();
+
+ db.insert_aux(&kv, &[])
+ }
}
/// Load the stored-blocks key from the state.
-pub(crate) fn load_stored_blocks(store: &impl AuxStore)
+fn load_stored_blocks(store: &impl AuxStore)
-> sp_blockchain::Result>
{
load_decode(store, STORED_BLOCKS_KEY)
diff --git a/node/core/approval-voting/src/aux_schema/tests.rs b/node/core/approval-voting/src/approval_db/v1/tests.rs
similarity index 96%
rename from node/core/approval-voting/src/aux_schema/tests.rs
rename to node/core/approval-voting/src/approval_db/v1/tests.rs
index 4f7861ee6a8c..de7f4595ab79 100644
--- a/node/core/approval-voting/src/aux_schema/tests.rs
+++ b/node/core/approval-voting/src/approval_db/v1/tests.rs
@@ -17,8 +17,8 @@
//! Tests for the aux-schema of approval voting.
use super::*;
-use std::cell::RefCell;
use polkadot_primitives::v1::Id as ParaId;
+use std::cell::RefCell;
#[derive(Default)]
struct TestStore {
@@ -49,28 +49,28 @@ impl AuxStore for TestStore {
}
impl TestStore {
- fn write_stored_blocks(&self, range: StoredBlockRange) {
+ pub(crate) fn write_stored_blocks(&self, range: StoredBlockRange) {
self.inner.borrow_mut().insert(
STORED_BLOCKS_KEY.to_vec(),
range.encode(),
);
}
- fn write_blocks_at_height(&self, height: BlockNumber, blocks: &[Hash]) {
+ pub(crate) fn write_blocks_at_height(&self, height: BlockNumber, blocks: &[Hash]) {
self.inner.borrow_mut().insert(
blocks_at_height_key(height).to_vec(),
blocks.encode(),
);
}
- fn write_block_entry(&self, block_hash: &Hash, entry: &BlockEntry) {
+ pub(crate) fn write_block_entry(&self, block_hash: &Hash, entry: &BlockEntry) {
self.inner.borrow_mut().insert(
block_entry_key(block_hash).to_vec(),
entry.encode(),
);
}
- fn write_candidate_entry(&self, candidate_hash: &CandidateHash, entry: &CandidateEntry) {
+ pub(crate) fn write_candidate_entry(&self, candidate_hash: &CandidateHash, entry: &CandidateEntry) {
self.inner.borrow_mut().insert(
candidate_entry_key(candidate_hash).to_vec(),
entry.encode(),
@@ -89,8 +89,8 @@ fn make_block_entry(
BlockEntry {
block_hash,
session: 1,
- slot: 1.into(),
- relay_vrf_story: RelayVRF([0u8; 32]),
+ slot: Slot::from(1),
+ relay_vrf_story: [0u8; 32],
approved_bitfield: make_bitvec(candidates.len()),
candidates,
children: Vec::new(),
@@ -129,7 +129,6 @@ fn read_write() {
(hash_a, ApprovalEntry {
tranches: Vec::new(),
backing_group: GroupIndex(1),
- next_wakeup: 1000,
our_assignment: None,
assignments: Default::default(),
approved: false,
@@ -156,7 +155,7 @@ fn read_write() {
];
let delete_keys: Vec<_> = delete_keys.iter().map(|k| &k[..]).collect();
- store.insert_aux(&[], &delete_keys);
+ store.insert_aux(&[], &delete_keys).unwrap();
assert!(load_stored_blocks(&store).unwrap().is_none());
assert!(load_blocks_at_height(&store, 1).unwrap().is_empty());
@@ -296,7 +295,6 @@ fn clear_works() {
(hash_a, ApprovalEntry {
tranches: Vec::new(),
backing_group: GroupIndex(1),
- next_wakeup: 1000,
our_assignment: None,
assignments: Default::default(),
approved: false,
@@ -331,7 +329,7 @@ fn canonicalize_works() {
// -> B1 -> C1 -> D1
// A -> B2 -> C2 -> D2
//
- // We'll canonicalize C1. Everything except D1 should disappear.
+ // We'll canonicalize C1. Everytning except D1 should disappear.
//
// Candidates:
// Cand1 in B2
diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs
new file mode 100644
index 000000000000..706af13b27b2
--- /dev/null
+++ b/node/core/approval-voting/src/criteria.rs
@@ -0,0 +1,782 @@
+// Copyright 2020 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! Assignment criteria VRF generation and checking.
+
+use polkadot_node_primitives::approval::{
+ self as approval_types, AssignmentCert, AssignmentCertKind, DelayTranche, RelayVRFStory,
+};
+use polkadot_primitives::v1::{
+ CoreIndex, ValidatorIndex, SessionInfo, AssignmentPair, AssignmentId, GroupIndex,
+};
+use sc_keystore::LocalKeystore;
+use parity_scale_codec::{Encode, Decode};
+use sp_application_crypto::Public;
+
+use merlin::Transcript;
+use schnorrkel::vrf::VRFInOut;
+
+use std::collections::HashMap;
+use std::collections::hash_map::Entry;
+
+use super::LOG_TARGET;
+
+/// Details pertaining to our assignment on a block.
+#[derive(Debug, Clone, Encode, Decode, PartialEq)]
+pub struct OurAssignment {
+ cert: AssignmentCert,
+ tranche: DelayTranche,
+ validator_index: ValidatorIndex,
+ // Whether the assignment has been triggered already.
+ triggered: bool,
+}
+
+impl OurAssignment {
+ pub(crate) fn cert(&self) -> &AssignmentCert {
+ &self.cert
+ }
+
+ pub(crate) fn tranche(&self) -> DelayTranche {
+ self.tranche
+ }
+
+ pub(crate) fn validator_index(&self) -> ValidatorIndex {
+ self.validator_index
+ }
+
+ pub(crate) fn triggered(&self) -> bool {
+ self.triggered
+ }
+
+ pub(crate) fn mark_triggered(&mut self) {
+ self.triggered = true;
+ }
+}
+
+impl From for OurAssignment {
+ fn from(entry: crate::approval_db::v1::OurAssignment) -> Self {
+ OurAssignment {
+ cert: entry.cert,
+ tranche: entry.tranche,
+ validator_index: entry.validator_index,
+ triggered: entry.triggered,
+ }
+ }
+}
+
+impl From for crate::approval_db::v1::OurAssignment {
+ fn from(entry: OurAssignment) -> Self {
+ Self {
+ cert: entry.cert,
+ tranche: entry.tranche,
+ validator_index: entry.validator_index,
+ triggered: entry.triggered,
+ }
+ }
+}
+
+fn relay_vrf_modulo_transcript(
+ relay_vrf_story: RelayVRFStory,
+ sample: u32,
+) -> Transcript {
+ // combine the relay VRF story with a sample number.
+ let mut t = Transcript::new(approval_types::RELAY_VRF_MODULO_CONTEXT);
+ t.append_message(b"RC-VRF", &relay_vrf_story.0);
+ sample.using_encoded(|s| t.append_message(b"sample", s));
+
+ t
+}
+
+fn relay_vrf_modulo_core(
+ vrf_in_out: &VRFInOut,
+ n_cores: u32,
+) -> CoreIndex {
+ let bytes: [u8; 4] = vrf_in_out.make_bytes(approval_types::CORE_RANDOMNESS_CONTEXT);
+
+ // interpret as little-endian u32.
+ let random_core = u32::from_le_bytes(bytes) % n_cores;
+ CoreIndex(random_core)
+}
+
+fn relay_vrf_delay_transcript(
+ relay_vrf_story: RelayVRFStory,
+ core_index: CoreIndex,
+) -> Transcript {
+ let mut t = Transcript::new(approval_types::RELAY_VRF_DELAY_CONTEXT);
+ t.append_message(b"RC-VRF", &relay_vrf_story.0);
+ core_index.0.using_encoded(|s| t.append_message(b"core", s));
+ t
+}
+
+fn relay_vrf_delay_tranche(
+ vrf_in_out: &VRFInOut,
+ num_delay_tranches: u32,
+ zeroth_delay_tranche_width: u32,
+) -> DelayTranche {
+ let bytes: [u8; 4] = vrf_in_out.make_bytes(approval_types::TRANCHE_RANDOMNESS_CONTEXT);
+
+ // interpret as little-endian u32 and reduce by the number of tranches.
+ let wide_tranche = u32::from_le_bytes(bytes) % (num_delay_tranches + zeroth_delay_tranche_width);
+
+ // Consolidate early results to tranche zero so tranche zero is extra wide.
+ wide_tranche.saturating_sub(zeroth_delay_tranche_width)
+}
+
+fn assigned_core_transcript(core_index: CoreIndex) -> Transcript {
+ let mut t = Transcript::new(approval_types::ASSIGNED_CORE_CONTEXT);
+ core_index.0.using_encoded(|s| t.append_message(b"core", s));
+ t
+}
+
+/// Information about the world assignments are being produced in.
+#[derive(Clone)]
+pub(crate) struct Config {
+ /// The assignment public keys for validators.
+ assignment_keys: Vec,
+ /// The groups of validators assigned to each core.
+ validator_groups: Vec>,
+ /// The number of availability cores used by the protocol during this session.
+ n_cores: u32,
+ /// The zeroth delay tranche width.
+ zeroth_delay_tranche_width: u32,
+ /// The number of samples we do of relay_vrf_modulo.
+ relay_vrf_modulo_samples: u32,
+ /// The number of delay tranches in total.
+ n_delay_tranches: u32,
+}
+
+impl<'a> From<&'a SessionInfo> for Config {
+ fn from(s: &'a SessionInfo) -> Self {
+ Config {
+ assignment_keys: s.assignment_keys.clone(),
+ validator_groups: s.validator_groups.clone(),
+ n_cores: s.n_cores.clone(),
+ zeroth_delay_tranche_width: s.zeroth_delay_tranche_width.clone(),
+ relay_vrf_modulo_samples: s.relay_vrf_modulo_samples.clone(),
+ n_delay_tranches: s.n_delay_tranches.clone(),
+ }
+ }
+}
+
+/// A trait for producing and checking assignments. Used to mock.
+pub(crate) trait AssignmentCriteria {
+ fn compute_assignments(
+ &self,
+ keystore: &LocalKeystore,
+ relay_vrf_story: RelayVRFStory,
+ config: &Config,
+ leaving_cores: Vec<(CoreIndex, GroupIndex)>,
+ ) -> HashMap;
+
+ fn check_assignment_cert(
+ &self,
+ claimed_core_index: CoreIndex,
+ validator_index: ValidatorIndex,
+ config: &Config,
+ relay_vrf_story: RelayVRFStory,
+ assignment: &AssignmentCert,
+ backing_group: GroupIndex,
+ ) -> Result;
+}
+
+pub(crate) struct RealAssignmentCriteria;
+
+impl AssignmentCriteria for RealAssignmentCriteria {
+ fn compute_assignments(
+ &self,
+ keystore: &LocalKeystore,
+ relay_vrf_story: RelayVRFStory,
+ config: &Config,
+ leaving_cores: Vec<(CoreIndex, GroupIndex)>,
+ ) -> HashMap {
+ compute_assignments(
+ keystore,
+ relay_vrf_story,
+ config,
+ leaving_cores,
+ )
+ }
+
+ fn check_assignment_cert(
+ &self,
+ claimed_core_index: CoreIndex,
+ validator_index: ValidatorIndex,
+ config: &Config,
+ relay_vrf_story: RelayVRFStory,
+ assignment: &AssignmentCert,
+ backing_group: GroupIndex,
+ ) -> Result {
+ check_assignment_cert(
+ claimed_core_index,
+ validator_index,
+ config,
+ relay_vrf_story,
+ assignment,
+ backing_group,
+ )
+ }
+}
+
+/// Compute the assignments for a given block. Returns a map containing all assignments to cores in
+/// the block. If more than one assignment targets the given core, only the earliest assignment is kept.
+///
+/// The `leaving_cores` parameter indicates all cores within the block where a candidate was included,
+/// as well as the group index backing those.
+///
+/// The current description of the protocol assigns every validator to check every core. But at different times.
+/// The idea is that most assignments are never triggered and fall by the wayside.
+///
+/// This will not assign to anything the local validator was part of the backing group for.
+pub(crate) fn compute_assignments(
+ keystore: &LocalKeystore,
+ relay_vrf_story: RelayVRFStory,
+ config: &Config,
+ leaving_cores: impl IntoIterator- + Clone,
+) -> HashMap
{
+ let (index, assignments_key): (ValidatorIndex, AssignmentPair) = {
+ let key = config.assignment_keys.iter().enumerate()
+ .filter_map(|(i, p)| match keystore.key_pair(p) {
+ Ok(pair) => Some((i as ValidatorIndex, pair)),
+ Err(sc_keystore::Error::PairNotFound(_)) => None,
+ Err(e) => {
+ tracing::warn!(target: LOG_TARGET, "Encountered keystore error: {:?}", e);
+ None
+ }
+ })
+ .next();
+
+ match key {
+ None => return Default::default(),
+ Some(k) => k,
+ }
+ };
+
+ // Ignore any cores where the assigned group is our own.
+ let leaving_cores = leaving_cores.into_iter()
+ .filter(|&(_, ref g)| !is_in_backing_group(&config.validator_groups, index, *g))
+ .map(|(c, _)| c)
+ .collect::>();
+
+ let assignments_key: &sp_application_crypto::sr25519::Pair = assignments_key.as_ref();
+ let assignments_key: &schnorrkel::Keypair = assignments_key.as_ref();
+
+ let mut assignments = HashMap::new();
+
+ // First run `RelayVRFModulo` for each sample.
+ compute_relay_vrf_modulo_assignments(
+ &assignments_key,
+ index,
+ config,
+ relay_vrf_story.clone(),
+ leaving_cores.iter().cloned(),
+ &mut assignments,
+ );
+
+ // Then run `RelayVRFDelay` once for the whole block.
+ compute_relay_vrf_delay_assignments(
+ &assignments_key,
+ index,
+ config,
+ relay_vrf_story,
+ leaving_cores,
+ &mut assignments,
+ );
+
+ assignments
+}
+
+fn compute_relay_vrf_modulo_assignments(
+ assignments_key: &schnorrkel::Keypair,
+ validator_index: ValidatorIndex,
+ config: &Config,
+ relay_vrf_story: RelayVRFStory,
+ leaving_cores: impl IntoIterator- + Clone,
+ assignments: &mut HashMap
,
+) {
+ for rvm_sample in 0..config.relay_vrf_modulo_samples {
+ let mut core = Default::default();
+
+ let maybe_assignment = {
+ // Extra scope to ensure borrowing instead of moving core
+ // into closure.
+ let core = &mut core;
+ assignments_key.vrf_sign_extra_after_check(
+ relay_vrf_modulo_transcript(relay_vrf_story.clone(), rvm_sample),
+ |vrf_in_out| {
+ *core = relay_vrf_modulo_core(&vrf_in_out, config.n_cores);
+ if leaving_cores.clone().into_iter().any(|c| c == *core) {
+ Some(assigned_core_transcript(*core))
+ } else {
+ None
+ }
+ }
+ )
+ };
+
+ if let Some((vrf_in_out, vrf_proof, _)) = maybe_assignment {
+ // Sanity: `core` is always initialized to non-default here, as the closure above
+ // has been executed.
+ let cert = AssignmentCert {
+ kind: AssignmentCertKind::RelayVRFModulo { sample: rvm_sample },
+ vrf: (approval_types::VRFOutput(vrf_in_out.to_output()), approval_types::VRFProof(vrf_proof)),
+ };
+
+ // All assignments of type RelayVRFModulo have tranche 0.
+ assignments.entry(core).or_insert(OurAssignment {
+ cert,
+ tranche: 0,
+ validator_index,
+ triggered: false,
+ });
+ }
+ }
+}
+
+fn compute_relay_vrf_delay_assignments(
+ assignments_key: &schnorrkel::Keypair,
+ validator_index: ValidatorIndex,
+ config: &Config,
+ relay_vrf_story: RelayVRFStory,
+ leaving_cores: impl IntoIterator- ,
+ assignments: &mut HashMap
,
+) {
+ for core in leaving_cores {
+ let (vrf_in_out, vrf_proof, _) = assignments_key.vrf_sign(
+ relay_vrf_delay_transcript(relay_vrf_story.clone(), core),
+ );
+
+ let tranche = relay_vrf_delay_tranche(
+ &vrf_in_out,
+ config.n_delay_tranches,
+ config.zeroth_delay_tranche_width,
+ );
+
+ let cert = AssignmentCert {
+ kind: AssignmentCertKind::RelayVRFDelay { core_index: core },
+ vrf: (approval_types::VRFOutput(vrf_in_out.to_output()), approval_types::VRFProof(vrf_proof)),
+ };
+
+ let our_assignment = OurAssignment {
+ cert,
+ tranche,
+ validator_index,
+ triggered: false,
+ };
+
+ match assignments.entry(core) {
+ Entry::Vacant(e) => { let _ = e.insert(our_assignment); }
+ Entry::Occupied(mut e) => if e.get().tranche > our_assignment.tranche {
+ e.insert(our_assignment);
+ },
+ }
+ }
+}
+
+/// Assignment invalid.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct InvalidAssignment;
+
+impl std::fmt::Display for InvalidAssignment {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ write!(f, "Invalid Assignment")
+ }
+}
+
+impl std::error::Error for InvalidAssignment { }
+
+/// Checks the crypto of an assignment cert. Failure conditions:
+/// * Validator index out of bounds
+/// * VRF signature check fails
+/// * VRF output doesn't match assigned core
+/// * Core is not covered by extra data in signature
+/// * Core index out of bounds
+/// * Sample is out of bounds
+/// * Validator is present in backing group.
+///
+/// This function does not check whether the core is actually a valid assignment or not. That should be done
+/// outside of the scope of this function.
+pub(crate) fn check_assignment_cert(
+ claimed_core_index: CoreIndex,
+ validator_index: ValidatorIndex,
+ config: &Config,
+ relay_vrf_story: RelayVRFStory,
+ assignment: &AssignmentCert,
+ backing_group: GroupIndex,
+) -> Result {
+ let validator_public = config.assignment_keys
+ .get(validator_index as usize)
+ .ok_or(InvalidAssignment)?;
+
+ let public = schnorrkel::PublicKey::from_bytes(validator_public.as_slice())
+ .map_err(|_| InvalidAssignment)?;
+
+ if claimed_core_index.0 >= config.n_cores {
+ return Err(InvalidAssignment);
+ }
+
+ // Check that the validator was not part of the backing group
+ // and not already assigned.
+ let is_in_backing = is_in_backing_group(
+ &config.validator_groups,
+ validator_index,
+ backing_group,
+ );
+
+ if is_in_backing {
+ return Err(InvalidAssignment);
+ }
+
+ let &(ref vrf_output, ref vrf_proof) = &assignment.vrf;
+ match assignment.kind {
+ AssignmentCertKind::RelayVRFModulo { sample } => {
+ if sample >= config.relay_vrf_modulo_samples {
+ return Err(InvalidAssignment);
+ }
+
+ let (vrf_in_out, _) = public.vrf_verify_extra(
+ relay_vrf_modulo_transcript(relay_vrf_story, sample),
+ &vrf_output.0,
+ &vrf_proof.0,
+ assigned_core_transcript(claimed_core_index),
+ ).map_err(|_| InvalidAssignment)?;
+
+ // ensure that the `vrf_in_out` actually gives us the claimed core.
+ if relay_vrf_modulo_core(&vrf_in_out, config.n_cores) == claimed_core_index {
+ Ok(0)
+ } else {
+ Err(InvalidAssignment)
+ }
+ }
+ AssignmentCertKind::RelayVRFDelay { core_index } => {
+ if core_index != claimed_core_index {
+ return Err(InvalidAssignment);
+ }
+
+ let (vrf_in_out, _) = public.vrf_verify(
+ relay_vrf_delay_transcript(relay_vrf_story, core_index),
+ &vrf_output.0,
+ &vrf_proof.0,
+ ).map_err(|_| InvalidAssignment)?;
+
+ Ok(relay_vrf_delay_tranche(
+ &vrf_in_out,
+ config.n_delay_tranches,
+ config.zeroth_delay_tranche_width,
+ ))
+ }
+ }
+}
+
+fn is_in_backing_group(
+ validator_groups: &[Vec],
+ validator: ValidatorIndex,
+ group: GroupIndex,
+) -> bool {
+ validator_groups.get(group.0 as usize).map_or(false, |g| g.contains(&validator))
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use sp_keystore::CryptoStore;
+ use sp_keyring::sr25519::Keyring as Sr25519Keyring;
+ use sp_application_crypto::sr25519;
+ use sp_core::crypto::Pair as PairT;
+ use polkadot_primitives::v1::ASSIGNMENT_KEY_TYPE_ID;
+ use polkadot_node_primitives::approval::{VRFOutput, VRFProof};
+
+ // sets up a keystore with the given keyring accounts.
+ async fn make_keystore(accounts: &[Sr25519Keyring]) -> LocalKeystore {
+ let store = LocalKeystore::in_memory();
+
+ for s in accounts.iter().copied().map(|k| k.to_seed()) {
+ store.sr25519_generate_new(
+ ASSIGNMENT_KEY_TYPE_ID,
+ Some(s.as_str()),
+ ).await.unwrap();
+ }
+
+ store
+ }
+
+ fn assignment_keys(accounts: &[Sr25519Keyring]) -> Vec {
+ assignment_keys_plus_random(accounts, 0)
+ }
+
+ fn assignment_keys_plus_random(accounts: &[Sr25519Keyring], random: usize) -> Vec {
+ let gen_random = (0..random).map(|_|
+ AssignmentId::from(sr25519::Pair::generate().0.public())
+ );
+
+ accounts.iter()
+ .map(|k| AssignmentId::from(k.public()))
+ .chain(gen_random)
+ .collect()
+ }
+
+ fn basic_groups(n_validators: usize, n_groups: usize) -> Vec> {
+ let size = n_validators / n_groups;
+ let big_groups = n_validators % n_groups;
+ let scraps = n_groups * size;
+
+ (0..n_groups).map(|i| {
+ (i * size .. (i + 1) *size)
+ .chain(if i < big_groups { Some(scraps + i) } else { None })
+ .map(|j| j as ValidatorIndex)
+ .collect::>()
+ }).collect()
+ }
+
+ // used for generating assignments where the validity of the VRF doesn't matter.
+ fn garbage_vrf() -> (VRFOutput, VRFProof) {
+ let key = Sr25519Keyring::Alice.pair();
+ let key: &schnorrkel::Keypair = key.as_ref();
+
+ let (o, p, _) = key.vrf_sign(Transcript::new(b"test-garbage"));
+ (VRFOutput(o.to_output()), VRFProof(p))
+ }
+
+ #[test]
+ fn assignments_produced_for_non_backing() {
+ let keystore = futures::executor::block_on(
+ make_keystore(&[Sr25519Keyring::Alice])
+ );
+
+ let relay_vrf_story = RelayVRFStory([42u8; 32]);
+ let assignments = compute_assignments(
+ &keystore,
+ relay_vrf_story,
+ &Config {
+ assignment_keys: assignment_keys(&[
+ Sr25519Keyring::Alice,
+ Sr25519Keyring::Bob,
+ Sr25519Keyring::Charlie,
+ ]),
+ validator_groups: vec![vec![0], vec![1, 2]],
+ n_cores: 2,
+ zeroth_delay_tranche_width: 10,
+ relay_vrf_modulo_samples: 3,
+ n_delay_tranches: 40,
+ },
+ vec![(CoreIndex(0), GroupIndex(1)), (CoreIndex(1), GroupIndex(0))],
+ );
+
+ // Note that alice is in group 0, which was the backing group for core 1.
+ // Alice should have self-assigned to check core 0 but not 1.
+ assert_eq!(assignments.len(), 1);
+ assert!(assignments.get(&CoreIndex(0)).is_some());
+ }
+
+ #[test]
+ fn assign_to_nonzero_core() {
+ let keystore = futures::executor::block_on(
+ make_keystore(&[Sr25519Keyring::Alice])
+ );
+
+ let relay_vrf_story = RelayVRFStory([42u8; 32]);
+ let assignments = compute_assignments(
+ &keystore,
+ relay_vrf_story,
+ &Config {
+ assignment_keys: assignment_keys(&[
+ Sr25519Keyring::Alice,
+ Sr25519Keyring::Bob,
+ Sr25519Keyring::Charlie,
+ ]),
+ validator_groups: vec![vec![0], vec![1, 2]],
+ n_cores: 2,
+ zeroth_delay_tranche_width: 10,
+ relay_vrf_modulo_samples: 3,
+ n_delay_tranches: 40,
+ },
+ vec![(CoreIndex(0), GroupIndex(0)), (CoreIndex(1), GroupIndex(1))],
+ );
+
+ assert_eq!(assignments.len(), 1);
+ assert!(assignments.get(&CoreIndex(1)).is_some());
+ }
+
+ struct MutatedAssignment {
+ core: CoreIndex,
+ cert: AssignmentCert,
+ group: GroupIndex,
+ own_group: GroupIndex,
+ val_index: ValidatorIndex,
+ config: Config,
+ }
+
+ // This fails if the closure requests to skip everything.
+ fn check_mutated_assignments(
+ n_validators: usize,
+ n_cores: usize,
+ rotation_offset: usize,
+ f: impl Fn(&mut MutatedAssignment) -> Option, // None = skip
+ ) {
+ let keystore = futures::executor::block_on(
+ make_keystore(&[Sr25519Keyring::Alice])
+ );
+
+ let group_for_core = |i| GroupIndex(((i + rotation_offset) % n_cores) as _);
+
+ let config = Config {
+ assignment_keys: assignment_keys_plus_random(&[Sr25519Keyring::Alice], n_validators - 1),
+ validator_groups: basic_groups(n_validators, n_cores),
+ n_cores: n_cores as u32,
+ zeroth_delay_tranche_width: 10,
+ relay_vrf_modulo_samples: 3,
+ n_delay_tranches: 40,
+ };
+
+ let relay_vrf_story = RelayVRFStory([42u8; 32]);
+ let assignments = compute_assignments(
+ &keystore,
+ relay_vrf_story.clone(),
+ &config,
+ (0..n_cores)
+ .map(|i| (
+ CoreIndex(i as u32),
+ group_for_core(i),
+ ))
+ .collect::>(),
+ );
+
+ let mut counted = 0;
+ for (core, assignment) in assignments {
+ let mut mutated = MutatedAssignment {
+ core,
+ group: group_for_core(core.0 as _),
+ cert: assignment.cert,
+ own_group: GroupIndex(0),
+ val_index: 0,
+ config: config.clone(),
+ };
+
+ let expected = match f(&mut mutated) {
+ None => continue,
+ Some(e) => e,
+ };
+
+ counted += 1;
+
+ let is_good = check_assignment_cert(
+ mutated.core,
+ mutated.val_index,
+ &mutated.config,
+ relay_vrf_story.clone(),
+ &mutated.cert,
+ mutated.group,
+ ).is_ok();
+
+ assert_eq!(expected, is_good)
+ }
+
+ assert!(counted > 0);
+ }
+
+ #[test]
+ fn computed_assignments_pass_checks() {
+ check_mutated_assignments(200, 100, 25, |_| Some(true));
+ }
+
+ #[test]
+ fn check_rejects_claimed_core_out_of_bounds() {
+ check_mutated_assignments(200, 100, 25, |m| {
+ m.core.0 += 100;
+ Some(false)
+ });
+ }
+
+ #[test]
+ fn check_rejects_in_backing_group() {
+ check_mutated_assignments(200, 100, 25, |m| {
+ m.group = m.own_group;
+ Some(false)
+ });
+ }
+
+ #[test]
+ fn check_rejects_nonexistent_key() {
+ check_mutated_assignments(200, 100, 25, |m| {
+ m.val_index += 200;
+ Some(false)
+ });
+ }
+
+ #[test]
+ fn check_rejects_delay_bad_vrf() {
+ check_mutated_assignments(40, 10, 8, |m| {
+ match m.cert.kind.clone() {
+ AssignmentCertKind::RelayVRFDelay { .. } => {
+ m.cert.vrf = garbage_vrf();
+ Some(false)
+ }
+ _ => None, // skip everything else.
+ }
+ });
+ }
+
+ #[test]
+ fn check_rejects_modulo_bad_vrf() {
+ check_mutated_assignments(200, 100, 25, |m| {
+ match m.cert.kind.clone() {
+ AssignmentCertKind::RelayVRFModulo { .. } => {
+ m.cert.vrf = garbage_vrf();
+ Some(false)
+ }
+ _ => None, // skip everything else.
+ }
+ });
+ }
+
+ #[test]
+ fn check_rejects_modulo_sample_out_of_bounds() {
+ check_mutated_assignments(200, 100, 25, |m| {
+ match m.cert.kind.clone() {
+ AssignmentCertKind::RelayVRFModulo { sample } => {
+ m.config.relay_vrf_modulo_samples = sample;
+ Some(false)
+ }
+ _ => None, // skip everything else.
+ }
+ });
+ }
+
+ #[test]
+ fn check_rejects_delay_claimed_core_wrong() {
+ check_mutated_assignments(200, 100, 25, |m| {
+ match m.cert.kind.clone() {
+ AssignmentCertKind::RelayVRFDelay { .. } => {
+ m.core = CoreIndex((m.core.0 + 1) % 100);
+ Some(false)
+ }
+ _ => None, // skip everything else.
+ }
+ });
+ }
+
+ #[test]
+ fn check_rejects_modulo_core_wrong() {
+ check_mutated_assignments(200, 100, 25, |m| {
+ match m.cert.kind.clone() {
+ AssignmentCertKind::RelayVRFModulo { .. } => {
+ m.core = CoreIndex((m.core.0 + 1) % 100);
+ Some(false)
+ }
+ _ => None, // skip everything else.
+ }
+ });
+ }
+}
diff --git a/node/core/approval-voting/src/import.rs b/node/core/approval-voting/src/import.rs
new file mode 100644
index 000000000000..b7162a939808
--- /dev/null
+++ b/node/core/approval-voting/src/import.rs
@@ -0,0 +1,1749 @@
+// Copyright 2020 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! Block import logic for the approval voting subsystem.
+//!
+//! There are two major concerns when handling block import notifications.
+//! * Determining all new blocks.
+//! * Handling session changes
+//!
+//! When receiving a block import notification from the overseer, the
+//! approval voting subsystem needs to account for the fact that there
+//! may have been blocks missed by the notification. It needs to iterate
+//! the ancestry of the block notification back to either the last finalized
+//! block or a block that is already accounted for within the DB.
+//!
+//! We maintain a rolling window of session indices. This starts as empty
+
+use polkadot_subsystem::{
+ messages::{
+ RuntimeApiMessage, RuntimeApiRequest, ChainApiMessage, ApprovalDistributionMessage,
+ },
+ SubsystemContext, SubsystemError, SubsystemResult,
+};
+use polkadot_primitives::v1::{
+ Hash, SessionIndex, SessionInfo, CandidateEvent, Header, CandidateHash,
+ CandidateReceipt, CoreIndex, GroupIndex, BlockNumber,
+};
+use polkadot_node_primitives::approval::{
+ self as approval_types, BlockApprovalMeta, RelayVRFStory,
+};
+use sc_keystore::LocalKeystore;
+use sc_client_api::backend::AuxStore;
+use sp_consensus_slots::Slot;
+
+use futures::prelude::*;
+use futures::channel::oneshot;
+use bitvec::order::Lsb0 as BitOrderLsb0;
+
+use std::collections::HashMap;
+
+use crate::approval_db;
+use crate::persisted_entries::CandidateEntry;
+use crate::criteria::{AssignmentCriteria, OurAssignment};
+use crate::time::{slot_number_to_tick, Tick};
+
+use super::{APPROVAL_SESSIONS, LOG_TARGET, State, DBReader};
+
+/// A rolling window of sessions.
+#[derive(Default)]
+pub struct RollingSessionWindow {
+ pub earliest_session: Option,
+ pub session_info: Vec,
+}
+
+impl RollingSessionWindow {
+ pub fn session_info(&self, index: SessionIndex) -> Option<&SessionInfo> {
+ self.earliest_session.and_then(|earliest| {
+ if index < earliest {
+ None
+ } else {
+ self.session_info.get((index - earliest) as usize)
+ }
+ })
+
+ }
+
+ pub fn latest_session(&self) -> Option {
+ self.earliest_session
+ .map(|earliest| earliest + (self.session_info.len() as SessionIndex).saturating_sub(1))
+ }
+}
+
+// Given a new chain-head hash, this determines the hashes of all new blocks we should track
+// metadata for, given this head. The list will typically include the `head` hash provided unless
+// that block is already known, in which case the list should be empty. This is guaranteed to be
+// a subset of the ancestry of `head`, as well as `head`, starting from `head` and moving
+// backwards.
+//
+// This returns the entire ancestry up to the last finalized block's height or the last item we
+// have in the DB. This may be somewhat expensive when first recovering from major sync.
+async fn determine_new_blocks(
+ ctx: &mut impl SubsystemContext,
+ db: &impl DBReader,
+ head: Hash,
+ header: &Header,
+ finalized_number: BlockNumber,
+) -> SubsystemResult> {
+ const ANCESTRY_STEP: usize = 4;
+
+ // Early exit if the block is in the DB or too early.
+ {
+ let already_known = db.load_block_entry(&head)?
+ .is_some();
+
+ let before_relevant = header.number <= finalized_number;
+
+ if already_known || before_relevant {
+ return Ok(Vec::new());
+ }
+ }
+
+ let mut ancestry = vec![(head, header.clone())];
+
+ // Early exit if the parent hash is in the DB.
+ if db.load_block_entry(&header.parent_hash)?
+ .is_some()
+ {
+ return Ok(ancestry);
+ }
+
+ loop {
+ let &(ref last_hash, ref last_header) = ancestry.last()
+ .expect("ancestry has length 1 at initialization and is only added to; qed");
+
+ // If we iterated back to genesis, which can happen at the beginning of chains.
+ if last_header.number <= 1 {
+ break
+ }
+
+ let (tx, rx) = oneshot::channel();
+ ctx.send_message(ChainApiMessage::Ancestors {
+ hash: *last_hash,
+ k: ANCESTRY_STEP,
+ response_channel: tx,
+ }.into()).await;
+
+ // Continue past these errors.
+ let batch_hashes = match rx.await {
+ Err(_) | Ok(Err(_)) => break,
+ Ok(Ok(ancestors)) => ancestors,
+ };
+
+ let batch_headers = {
+ let (batch_senders, batch_receivers) = (0..batch_hashes.len())
+ .map(|_| oneshot::channel())
+ .unzip::<_, _, Vec<_>, Vec<_>>();
+
+ for (hash, sender) in batch_hashes.iter().cloned().zip(batch_senders) {
+ ctx.send_message(ChainApiMessage::BlockHeader(hash, sender).into()).await;
+ }
+
+ let mut requests = futures::stream::FuturesOrdered::new();
+ batch_receivers.into_iter().map(|rx| async move {
+ match rx.await {
+ Err(_) | Ok(Err(_)) => None,
+ Ok(Ok(h)) => h,
+ }
+ })
+ .for_each(|x| requests.push(x));
+
+ let batch_headers: Vec<_> = requests
+ .flat_map(|x: Option| stream::iter(x))
+ .collect()
+ .await;
+
+ // Any failed header fetch of the batch will yield a `None` result that will
+ // be skipped. Any failure at this stage means we'll just ignore those blocks
+ // as the chain DB has failed us.
+ if batch_headers.len() != batch_hashes.len() { break }
+ batch_headers
+ };
+
+ for (hash, header) in batch_hashes.into_iter().zip(batch_headers) {
+ let is_known = db.load_block_entry(&hash)?.is_some();
+
+ let is_relevant = header.number > finalized_number;
+
+ if is_known || !is_relevant {
+ break
+ }
+
+ ancestry.push((hash, header));
+ }
+ }
+
+ ancestry.reverse();
+ Ok(ancestry)
+}
+
+async fn load_all_sessions(
+ ctx: &mut impl SubsystemContext,
+ block_hash: Hash,
+ start: SessionIndex,
+ end_inclusive: SessionIndex,
+) -> SubsystemResult>> {
+ let mut v = Vec::new();
+ for i in start..=end_inclusive {
+ let (tx, rx)= oneshot::channel();
+ ctx.send_message(RuntimeApiMessage::Request(
+ block_hash,
+ RuntimeApiRequest::SessionInfo(i, tx),
+ ).into()).await;
+
+ let session_info = match rx.await {
+ Ok(Ok(Some(s))) => s,
+ Ok(Ok(None)) => return Ok(None),
+ Ok(Err(e)) => return Err(SubsystemError::with_origin("approval-voting", e)),
+ Err(e) => return Err(SubsystemError::with_origin("approval-voting", e)),
+ };
+
+ v.push(session_info);
+ }
+
+ Ok(Some(v))
+}
+
+// Sessions unavailable in state to cache.
+#[derive(Debug)]
+struct SessionsUnavailable;
+
+// When inspecting a new import notification, updates the session info cache to match
+// the session of the imported block.
+//
+// this only needs to be called on heads where we are directly notified about import, as sessions do
+// not change often and import notifications are expected to be typically increasing in session number.
+//
+// some backwards drift in session index is acceptable.
+async fn cache_session_info_for_head(
+ ctx: &mut impl SubsystemContext,
+ session_window: &mut RollingSessionWindow,
+ block_hash: Hash,
+ block_header: &Header,
+) -> SubsystemResult> {
+ let session_index = {
+ let (s_tx, s_rx) = oneshot::channel();
+
+ // The genesis is guaranteed to be at the beginning of the session and its parent state
+ // is non-existent. Therefore if we're at the genesis, we request using its state and
+ // not the parent.
+ ctx.send_message(RuntimeApiMessage::Request(
+ if block_header.number == 0 { block_hash } else { block_header.parent_hash },
+ RuntimeApiRequest::SessionIndexForChild(s_tx),
+ ).into()).await;
+
+ match s_rx.await? {
+ Ok(s) => s,
+ Err(e) => return Err(SubsystemError::with_origin("approval-voting", e)),
+ }
+ };
+
+ match session_window.earliest_session {
+ None => {
+ // First block processed on start-up.
+
+ let window_start = session_index.saturating_sub(APPROVAL_SESSIONS - 1);
+
+ tracing::info!(
+ target: LOG_TARGET, "Loading approval window from session {}..={}",
+ window_start, session_index,
+ );
+
+ match load_all_sessions(ctx, block_hash, window_start, session_index).await? {
+ None => {
+ tracing::warn!(
+ target: LOG_TARGET,
+ "Could not load sessions {}..={} from block {:?} in session {}",
+ window_start, session_index, block_hash, session_index,
+ );
+
+ return Ok(Err(SessionsUnavailable));
+ },
+ Some(s) => {
+ session_window.earliest_session = Some(window_start);
+ session_window.session_info = s;
+ }
+ }
+ }
+ Some(old_window_start) => {
+ let latest = session_window.latest_session().expect("latest always exists if earliest does; qed");
+
+ // Either cached or ancient.
+ if session_index <= latest { return Ok(Ok(())) }
+
+ let old_window_end = latest;
+
+ let window_start = session_index.saturating_sub(APPROVAL_SESSIONS - 1);
+ tracing::info!(
+ target: LOG_TARGET, "Moving approval window from session {}..={} to {}..={}",
+ old_window_start, old_window_end,
+ window_start, session_index,
+ );
+
+ // keep some of the old window, if applicable.
+ let overlap_start = window_start - old_window_start;
+
+ let fresh_start = if latest < window_start {
+ window_start
+ } else {
+ latest + 1
+ };
+
+ match load_all_sessions(ctx, block_hash, fresh_start, session_index).await? {
+ None => {
+ tracing::warn!(
+ target: LOG_TARGET,
+ "Could not load sessions {}..={} from block {:?} in session {}",
+ latest + 1, session_index, block_hash, session_index,
+ );
+
+ return Ok(Err(SessionsUnavailable));
+ }
+ Some(s) => {
+ session_window.session_info.drain(..overlap_start as usize);
+ session_window.session_info.extend(s);
+ session_window.earliest_session = Some(window_start);
+ }
+ }
+ }
+ }
+
+ Ok(Ok(()))
+}
+
+struct ImportedBlockInfo {
+ included_candidates: Vec<(CandidateHash, CandidateReceipt, CoreIndex, GroupIndex)>,
+ session_index: SessionIndex,
+ assignments: HashMap,
+ n_validators: usize,
+ relay_vrf_story: RelayVRFStory,
+ slot: Slot,
+}
+
+struct ImportedBlockInfoEnv<'a> {
+ session_window: &'a RollingSessionWindow,
+ assignment_criteria: &'a (dyn AssignmentCriteria + Send + Sync),
+ keystore: &'a LocalKeystore,
+}
+
+// Computes information about the imported block. Returns `None` if the info couldn't be extracted -
+// failure to communicate with overseer,
+async fn imported_block_info(
+ ctx: &mut impl SubsystemContext,
+ env: ImportedBlockInfoEnv<'_>,
+ block_hash: Hash,
+ block_header: &Header,
+) -> SubsystemResult> {
+ // Ignore any runtime API errors - that means these blocks are old and finalized.
+ // Only unfinalized blocks factor into the approval voting process.
+
+ // fetch candidates
+ let included_candidates: Vec<_> = {
+ let (c_tx, c_rx) = oneshot::channel();
+ ctx.send_message(RuntimeApiMessage::Request(
+ block_hash,
+ RuntimeApiRequest::CandidateEvents(c_tx),
+ ).into()).await;
+
+ let events: Vec = match c_rx.await {
+ Ok(Ok(events)) => events,
+ Ok(Err(_)) => return Ok(None),
+ Err(_) => return Ok(None),
+ };
+
+ events.into_iter().filter_map(|e| match e {
+ CandidateEvent::CandidateIncluded(receipt, _, core, group)
+ => Some((receipt.hash(), receipt, core, group)),
+ _ => None,
+ }).collect()
+ };
+
+ // fetch session. ignore blocks that are too old, but unless sessions are really
+ // short, that shouldn't happen.
+ let session_index = {
+ let (s_tx, s_rx) = oneshot::channel();
+ ctx.send_message(RuntimeApiMessage::Request(
+ block_header.parent_hash,
+ RuntimeApiRequest::SessionIndexForChild(s_tx),
+ ).into()).await;
+
+ let session_index = match s_rx.await {
+ Ok(Ok(s)) => s,
+ Ok(Err(_)) => return Ok(None),
+ Err(_) => return Ok(None),
+ };
+
+ if env.session_window.earliest_session.as_ref().map_or(true, |e| &session_index < e) {
+ tracing::debug!(target: LOG_TARGET, "Block {} is from ancient session {}. Skipping",
+ block_hash, session_index);
+
+ return Ok(None);
+ }
+
+ session_index
+ };
+
+ let babe_epoch = {
+ let (s_tx, s_rx) = oneshot::channel();
+
+ // It's not obvious whether to use the hash or the parent hash for this, intuitively. We
+ // want to use the block hash itself, and here's why:
+ //
+ // First off, 'epoch' in BABE means 'session' in other places. 'epoch' is the terminology from
+ // the paper, which we fulfill using 'session's, which are a Substrate consensus concept.
+ //
+ // In BABE, the on-chain and off-chain view of the current epoch can differ at epoch boundaries
+ // because epochs change precisely at a slot. When a block triggers a new epoch, the state of
+ // its parent will still have the old epoch. Conversely, we have the invariant that every
+ // block in BABE has the epoch _it was authored in_ within its post-state. So we use the
+ // block, and not its parent.
+ //
+ // It's worth nothing that Polkadot session changes, at least for the purposes of parachains,
+ // would function the same way, except for the fact that they're always delayed by one block.
+ // This gives us the opposite invariant for sessions - the parent block's post-state gives
+ // us the canonical information about the session index for any of its children, regardless
+ // of which slot number they might be produced at.
+ ctx.send_message(RuntimeApiMessage::Request(
+ block_hash,
+ RuntimeApiRequest::CurrentBabeEpoch(s_tx),
+ ).into()).await;
+
+ match s_rx.await {
+ Ok(Ok(s)) => s,
+ Ok(Err(_)) => return Ok(None),
+ Err(_) => return Ok(None),
+ }
+ };
+
+ let session_info = match env.session_window.session_info(session_index) {
+ Some(s) => s,
+ None => {
+ tracing::debug!(
+ target: LOG_TARGET,
+ "Session info unavailable for block {}",
+ block_hash,
+ );
+
+ return Ok(None);
+ }
+ };
+
+ let (assignments, slot, relay_vrf_story) = {
+ let unsafe_vrf = approval_types::babe_unsafe_vrf_info(&block_header);
+
+ match unsafe_vrf {
+ Some(unsafe_vrf) => {
+ let slot = unsafe_vrf.slot();
+
+ match unsafe_vrf.compute_randomness(
+ &babe_epoch.authorities,
+ &babe_epoch.randomness,
+ babe_epoch.epoch_index,
+ ) {
+ Ok(relay_vrf) => {
+ let assignments = env.assignment_criteria.compute_assignments(
+ &env.keystore,
+ relay_vrf.clone(),
+ &crate::criteria::Config::from(session_info),
+ included_candidates.iter()
+ .map(|(_, _, core, group)| (*core, *group))
+ .collect(),
+ );
+
+ (assignments, slot, relay_vrf)
+ },
+ Err(_) => return Ok(None),
+ }
+ }
+ None => {
+ tracing::debug!(
+ target: LOG_TARGET,
+ "BABE VRF info unavailable for block {}",
+ block_hash,
+ );
+
+ return Ok(None);
+ }
+ }
+ };
+
+ Ok(Some(ImportedBlockInfo {
+ included_candidates,
+ session_index,
+ assignments,
+ n_validators: session_info.validators.len(),
+ relay_vrf_story,
+ slot,
+ }))
+}
+
+/// Information about a block and imported candidates.
+pub struct BlockImportedCandidates {
+ pub block_hash: Hash,
+ pub block_number: BlockNumber,
+ pub block_tick: Tick,
+ pub no_show_duration: Tick,
+ pub imported_candidates: Vec<(CandidateHash, CandidateEntry)>,
+}
+
+/// Handle a new notification of a header. This will
+/// * determine all blocks to import,
+/// * extract candidate information from them
+/// * update the rolling session window
+/// * compute our assignments
+/// * import the block and candidates to the approval DB
+/// * and return information about all candidates imported under each block.
+///
+/// It is the responsibility of the caller to schedule wakeups for each block.
+pub(crate) async fn handle_new_head(
+ ctx: &mut impl SubsystemContext,
+ state: &mut State,
+ db_writer: &impl AuxStore,
+ head: Hash,
+ finalized_number: &Option,
+) -> SubsystemResult> {
+ // Update session info based on most recent head.
+
+ let header = {
+ let (h_tx, h_rx) = oneshot::channel();
+ ctx.send_message(ChainApiMessage::BlockHeader(head, h_tx).into()).await;
+
+ match h_rx.await? {
+ Err(e) => {
+ return Err(SubsystemError::with_origin("approval-voting", e));
+ }
+ Ok(None) => {
+ tracing::warn!(target: LOG_TARGET, "Missing header for new head {}", head);
+ return Ok(Vec::new());
+ }
+ Ok(Some(h)) => h
+ }
+ };
+
+ if let Err(SessionsUnavailable)
+ = cache_session_info_for_head(
+ ctx,
+ &mut state.session_window,
+ head,
+ &header,
+ ).await?
+ {
+ tracing::warn!(
+ target: LOG_TARGET,
+ "Could not cache session info when processing head {:?}",
+ head,
+ );
+
+ return Ok(Vec::new())
+ }
+
+ // If we've just started the node and haven't yet received any finality notifications,
+ // we don't do any look-back. Approval voting is only for nodes were already online.
+ let finalized_number = finalized_number.unwrap_or(header.number.saturating_sub(1));
+
+ let new_blocks = determine_new_blocks(ctx, &state.db, head, &header, finalized_number)
+ .map_err(|e| SubsystemError::with_origin("approval-voting", e))
+ .await?;
+
+ let mut approval_meta: Vec = Vec::with_capacity(new_blocks.len());
+ let mut imported_candidates = Vec::with_capacity(new_blocks.len());
+
+ // `determine_new_blocks` gives us a vec in backwards order. we want to move forwards.
+ for (block_hash, block_header) in new_blocks.into_iter().rev() {
+ let env = ImportedBlockInfoEnv {
+ session_window: &state.session_window,
+ assignment_criteria: &*state.assignment_criteria,
+ keystore: &state.keystore,
+ };
+
+ let ImportedBlockInfo {
+ included_candidates,
+ session_index,
+ assignments,
+ n_validators,
+ relay_vrf_story,
+ slot,
+ } = match imported_block_info(ctx, env, block_hash, &block_header).await? {
+ Some(i) => i,
+ None => continue,
+ };
+
+ let candidate_entries = approval_db::v1::add_block_entry(
+ db_writer,
+ block_header.parent_hash,
+ block_header.number,
+ approval_db::v1::BlockEntry {
+ block_hash: block_hash,
+ session: session_index,
+ slot,
+ relay_vrf_story: relay_vrf_story.0,
+ candidates: included_candidates.iter()
+ .map(|(hash, _, core, _)| (*core, *hash)).collect(),
+ approved_bitfield: bitvec::bitvec![BitOrderLsb0, u8; 0; included_candidates.len()],
+ children: Vec::new(),
+ },
+ n_validators,
+ |candidate_hash| {
+ included_candidates.iter().find(|(hash, _, _, _)| candidate_hash == hash)
+ .map(|(_, receipt, core, backing_group)| approval_db::v1::NewCandidateInfo {
+ candidate: receipt.clone(),
+ backing_group: *backing_group,
+ our_assignment: assignments.get(core).map(|a| a.clone().into()),
+ })
+ }
+ ).map_err(|e| SubsystemError::with_origin("approval-voting", e))?;
+ approval_meta.push(BlockApprovalMeta {
+ hash: block_hash,
+ number: block_header.number,
+ parent_hash: block_header.parent_hash,
+ candidates: included_candidates.iter().map(|(hash, _, _, _)| *hash).collect(),
+ slot,
+ });
+
+ let (block_tick, no_show_duration) = {
+ let session_info = state.session_window.session_info(session_index)
+ .expect("imported_block_info requires session to be available; qed");
+
+ let block_tick = slot_number_to_tick(state.slot_duration_millis, slot);
+ let no_show_duration = slot_number_to_tick(
+ state.slot_duration_millis,
+ Slot::from(u64::from(session_info.no_show_slots)),
+ );
+
+ (block_tick, no_show_duration)
+ };
+
+ imported_candidates.push(
+ BlockImportedCandidates {
+ block_hash,
+ block_number: block_header.number,
+ block_tick,
+ no_show_duration,
+ imported_candidates: candidate_entries
+ .into_iter()
+ .map(|(h, e)| (h, e.into()))
+ .collect(),
+ }
+ );
+ }
+
+ ctx.send_message(ApprovalDistributionMessage::NewBlocks(approval_meta).into()).await;
+
+ Ok(imported_candidates)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use polkadot_node_subsystem_test_helpers::make_subsystem_context;
+ use polkadot_node_primitives::approval::{VRFOutput, VRFProof};
+ use polkadot_subsystem::messages::AllMessages;
+ use sp_core::testing::TaskExecutor;
+ use sp_runtime::{Digest, DigestItem};
+ use sp_consensus_babe::Epoch as BabeEpoch;
+ use sp_consensus_babe::digests::{CompatibleDigestItem, PreDigest, SecondaryVRFPreDigest};
+ use sp_keyring::sr25519::Keyring as Sr25519Keyring;
+ use assert_matches::assert_matches;
+ use merlin::Transcript;
+
+ use crate::{criteria, BlockEntry};
+
+ #[derive(Default)]
+ struct TestDB {
+ block_entries: HashMap,
+ candidate_entries: HashMap,
+ }
+
+ impl DBReader for TestDB {
+ fn load_block_entry(
+ &self,
+ block_hash: &Hash,
+ ) -> SubsystemResult> {
+ Ok(self.block_entries.get(block_hash).map(|c| c.clone()))
+ }
+
+ fn load_candidate_entry(
+ &self,
+ candidate_hash: &CandidateHash,
+ ) -> SubsystemResult > {
+ Ok(self.candidate_entries.get(candidate_hash).map(|c| c.clone()))
+ }
+ }
+
+ #[derive(Clone)]
+ struct TestChain {
+ start_number: BlockNumber,
+ headers: Vec,
+ numbers: HashMap,
+ }
+
+ impl TestChain {
+ fn new(start: BlockNumber, len: usize) -> Self {
+ assert!(len > 0, "len must be at least 1");
+
+ let base = Header {
+ digest: Default::default(),
+ extrinsics_root: Default::default(),
+ number: start,
+ state_root: Default::default(),
+ parent_hash: Default::default(),
+ };
+
+ let base_hash = base.hash();
+
+ let mut chain = TestChain {
+ start_number: start,
+ headers: vec![base],
+ numbers: vec![(base_hash, start)].into_iter().collect(),
+ };
+
+ for _ in 1..len {
+ chain.grow()
+ }
+
+ chain
+ }
+
+ fn grow(&mut self) {
+ let next = {
+ let last = self.headers.last().unwrap();
+ Header {
+ digest: Default::default(),
+ extrinsics_root: Default::default(),
+ number: last.number + 1,
+ state_root: Default::default(),
+ parent_hash: last.hash(),
+ }
+ };
+
+ self.numbers.insert(next.hash(), next.number);
+ self.headers.push(next);
+ }
+
+ fn header_by_number(&self, number: BlockNumber) -> Option<&Header> {
+ if number < self.start_number {
+ None
+ } else {
+ self.headers.get((number - self.start_number) as usize)
+ }
+ }
+
+ fn header_by_hash(&self, hash: &Hash) -> Option<&Header> {
+ self.numbers.get(hash).and_then(|n| self.header_by_number(*n))
+ }
+
+ fn hash_by_number(&self, number: BlockNumber) -> Option {
+ self.header_by_number(number).map(|h| h.hash())
+ }
+
+ fn ancestry(&self, hash: &Hash, k: BlockNumber) -> Vec {
+ let n = match self.numbers.get(hash) {
+ None => return Vec::new(),
+ Some(&n) => n,
+ };
+
+ (0..k)
+ .map(|i| i + 1)
+ .filter_map(|i| self.header_by_number(n - i))
+ .map(|h| h.hash())
+ .collect()
+ }
+ }
+
+ struct MockAssignmentCriteria;
+
+ impl AssignmentCriteria for MockAssignmentCriteria {
+ fn compute_assignments(
+ &self,
+ _keystore: &LocalKeystore,
+ _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory,
+ _config: &criteria::Config,
+ _leaving_cores: Vec<(polkadot_primitives::v1::CoreIndex, polkadot_primitives::v1::GroupIndex)>,
+ ) -> HashMap {
+ HashMap::new()
+ }
+
+ fn check_assignment_cert(
+ &self,
+ _claimed_core_index: polkadot_primitives::v1::CoreIndex,
+ _validator_index: polkadot_primitives::v1::ValidatorIndex,
+ _config: &criteria::Config,
+ _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory,
+ _assignment: &polkadot_node_primitives::approval::AssignmentCert,
+ _backing_group: polkadot_primitives::v1::GroupIndex,
+ ) -> Result {
+ Ok(0)
+ }
+ }
+
+ // used for generating assignments where the validity of the VRF doesn't matter.
+ fn garbage_vrf() -> (VRFOutput, VRFProof) {
+ let key = Sr25519Keyring::Alice.pair();
+ let key: &schnorrkel::Keypair = key.as_ref();
+
+ let (o, p, _) = key.vrf_sign(Transcript::new(b"test-garbage"));
+ (VRFOutput(o.to_output()), VRFProof(p))
+ }
+
+ #[test]
+ fn determine_new_blocks_back_to_finalized() {
+ let pool = TaskExecutor::new();
+ let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
+
+ let db = TestDB::default();
+
+ let chain = TestChain::new(10, 9);
+
+ let head = chain.header_by_number(18).unwrap().clone();
+ let head_hash = head.hash();
+ let finalized_number = 12;
+
+ // Finalized block should be omitted. The head provided to `determine_new_blocks`
+ // should be included.
+ let expected_ancestry = (13..18)
+ .map(|n| chain.header_by_number(n).map(|h| (h.hash(), h.clone())).unwrap())
+ .rev()
+ .collect::>();
+
+ let test_fut = Box::pin(async move {
+ let ancestry = determine_new_blocks(
+ &mut ctx,
+ &db,
+ head_hash,
+ &head,
+ finalized_number,
+ ).await.unwrap();
+
+ assert_eq!(
+ ancestry,
+ expected_ancestry,
+ );
+ });
+
+ let aux_fut = Box::pin(async move {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::ChainApi(ChainApiMessage::Ancestors {
+ hash: h,
+ k,
+ response_channel: tx,
+ }) => {
+ assert_eq!(h, head_hash);
+ assert_eq!(k, 4);
+ let _ = tx.send(Ok(chain.ancestry(&h, k as _)));
+ }
+ );
+
+ for _ in 0..4 {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => {
+ let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone())));
+ }
+ );
+ }
+
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::ChainApi(ChainApiMessage::Ancestors {
+ hash: h,
+ k,
+ response_channel: tx,
+ }) => {
+ assert_eq!(h, chain.hash_by_number(14).unwrap());
+ assert_eq!(k, 4);
+ let _ = tx.send(Ok(chain.ancestry(&h, k as _)));
+ }
+ );
+
+ for _ in 0..4 {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => {
+ let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone())));
+ }
+ );
+ }
+
+ });
+
+ futures::executor::block_on(futures::future::select(test_fut, aux_fut));
+ }
+
+ #[test]
+ fn determine_new_blocks_back_to_known() {
+ let pool = TaskExecutor::new();
+ let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
+
+ let mut db = TestDB::default();
+
+ let chain = TestChain::new(10, 9);
+
+ let head = chain.header_by_number(18).unwrap().clone();
+ let head_hash = head.hash();
+ let finalized_number = 12;
+ let known_number = 15;
+ let known_hash = chain.hash_by_number(known_number).unwrap();
+
+ db.block_entries.insert(
+ known_hash,
+ crate::approval_db::v1::BlockEntry {
+ block_hash: known_hash,
+ session: 1,
+ slot: Slot::from(100),
+ relay_vrf_story: Default::default(),
+ candidates: Vec::new(),
+ approved_bitfield: Default::default(),
+ children: Vec::new(),
+ }.into(),
+ );
+
+ // Known block should be omitted. The head provided to `determine_new_blocks`
+ // should be included.
+ let expected_ancestry = (16..18)
+ .map(|n| chain.header_by_number(n).map(|h| (h.hash(), h.clone())).unwrap())
+ .rev()
+ .collect::>();
+
+ let test_fut = Box::pin(async move {
+ let ancestry = determine_new_blocks(
+ &mut ctx,
+ &db,
+ head_hash,
+ &head,
+ finalized_number,
+ ).await.unwrap();
+
+ assert_eq!(
+ ancestry,
+ expected_ancestry,
+ );
+ });
+
+ let aux_fut = Box::pin(async move {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::ChainApi(ChainApiMessage::Ancestors {
+ hash: h,
+ k,
+ response_channel: tx,
+ }) => {
+ assert_eq!(h, head_hash);
+ assert_eq!(k, 4);
+ let _ = tx.send(Ok(chain.ancestry(&h, k as _)));
+ }
+ );
+
+ for _ in 0u32..4 {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => {
+ let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone())));
+ }
+ );
+ }
+ });
+
+ futures::executor::block_on(futures::future::select(test_fut, aux_fut));
+ }
+
+ #[test]
+ fn determine_new_blocks_already_known_is_empty() {
+ let pool = TaskExecutor::new();
+ let (mut ctx, _handle) = make_subsystem_context::<(), _>(pool.clone());
+
+ let mut db = TestDB::default();
+
+ let chain = TestChain::new(10, 9);
+
+ let head = chain.header_by_number(18).unwrap().clone();
+ let head_hash = head.hash();
+ let finalized_number = 0;
+
+ db.block_entries.insert(
+ head_hash,
+ crate::approval_db::v1::BlockEntry {
+ block_hash: head_hash,
+ session: 1,
+ slot: Slot::from(100),
+ relay_vrf_story: Default::default(),
+ candidates: Vec::new(),
+ approved_bitfield: Default::default(),
+ children: Vec::new(),
+ }.into(),
+ );
+
+ // Known block should be omitted.
+ let expected_ancestry = Vec::new();
+
+ let test_fut = Box::pin(async move {
+ let ancestry = determine_new_blocks(
+ &mut ctx,
+ &db,
+ head_hash,
+ &head,
+ finalized_number,
+ ).await.unwrap();
+
+ assert_eq!(
+ ancestry,
+ expected_ancestry,
+ );
+ });
+
+ futures::executor::block_on(test_fut);
+ }
+
+ #[test]
+ fn determine_new_blocks_parent_known_is_fast() {
+ let pool = TaskExecutor::new();
+ let (mut ctx, _handle) = make_subsystem_context::<(), _>(pool.clone());
+
+ let mut db = TestDB::default();
+
+ let chain = TestChain::new(10, 9);
+
+ let head = chain.header_by_number(18).unwrap().clone();
+ let head_hash = head.hash();
+ let finalized_number = 0;
+ let parent_hash = chain.hash_by_number(17).unwrap();
+
+ db.block_entries.insert(
+ parent_hash,
+ crate::approval_db::v1::BlockEntry {
+ block_hash: parent_hash,
+ session: 1,
+ slot: Slot::from(100),
+ relay_vrf_story: Default::default(),
+ candidates: Vec::new(),
+ approved_bitfield: Default::default(),
+ children: Vec::new(),
+ }.into(),
+ );
+
+ // New block should be the only new one.
+ let expected_ancestry = vec![(head_hash, head.clone())];
+
+ let test_fut = Box::pin(async move {
+ let ancestry = determine_new_blocks(
+ &mut ctx,
+ &db,
+ head_hash,
+ &head,
+ finalized_number,
+ ).await.unwrap();
+
+ assert_eq!(
+ ancestry,
+ expected_ancestry,
+ );
+ });
+
+ futures::executor::block_on(test_fut);
+ }
+
+ #[test]
+ fn determine_new_block_before_finality_is_empty() {
+ let pool = TaskExecutor::new();
+ let (mut ctx, _handle) = make_subsystem_context::<(), _>(pool.clone());
+
+ let chain = TestChain::new(10, 9);
+
+ let head = chain.header_by_number(18).unwrap().clone();
+ let head_hash = head.hash();
+ let parent_hash = chain.hash_by_number(17).unwrap();
+ let mut db = TestDB::default();
+
+ db.block_entries.insert(
+ parent_hash,
+ crate::approval_db::v1::BlockEntry {
+ block_hash: parent_hash,
+ session: 1,
+ slot: Slot::from(100),
+ relay_vrf_story: Default::default(),
+ candidates: Vec::new(),
+ approved_bitfield: Default::default(),
+ children: Vec::new(),
+ }.into(),
+ );
+
+ let test_fut = Box::pin(async move {
+ let after_finality = determine_new_blocks(
+ &mut ctx,
+ &db,
+ head_hash,
+ &head,
+ 17,
+ ).await.unwrap();
+
+ let at_finality = determine_new_blocks(
+ &mut ctx,
+ &db,
+ head_hash,
+ &head,
+ 18,
+ ).await.unwrap();
+
+ let before_finality = determine_new_blocks(
+ &mut ctx,
+ &db,
+ head_hash,
+ &head,
+ 19,
+ ).await.unwrap();
+
+ assert_eq!(
+ after_finality,
+ vec![(head_hash, head.clone())],
+ );
+
+ assert_eq!(
+ at_finality,
+ Vec::new(),
+ );
+
+ assert_eq!(
+ before_finality,
+ Vec::new(),
+ );
+ });
+
+ futures::executor::block_on(test_fut);
+ }
+
+ fn dummy_session_info(index: SessionIndex) -> SessionInfo {
+ SessionInfo {
+ validators: Vec::new(),
+ discovery_keys: Vec::new(),
+ assignment_keys: Vec::new(),
+ validator_groups: Vec::new(),
+ n_cores: index as _,
+ zeroth_delay_tranche_width: index as _,
+ relay_vrf_modulo_samples: index as _,
+ n_delay_tranches: index as _,
+ no_show_slots: index as _,
+ needed_approvals: index as _,
+ }
+ }
+
+
+ #[test]
+ fn imported_block_info_is_good() {
+ let pool = TaskExecutor::new();
+ let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
+
+ let session = 5;
+ let session_info = dummy_session_info(session);
+
+ let slot = Slot::from(10);
+
+ let header = Header {
+ digest: {
+ let mut d = Digest::default();
+ let (vrf_output, vrf_proof) = garbage_vrf();
+ d.push(DigestItem::babe_pre_digest(PreDigest::SecondaryVRF(
+ SecondaryVRFPreDigest {
+ authority_index: 0,
+ slot,
+ vrf_output,
+ vrf_proof,
+ }
+ )));
+
+ d
+ },
+ extrinsics_root: Default::default(),
+ number: 5,
+ state_root: Default::default(),
+ parent_hash: Default::default(),
+ };
+
+ let hash = header.hash();
+ let make_candidate = |para_id| {
+ let mut r = CandidateReceipt::default();
+ r.descriptor.para_id = para_id;
+ r.descriptor.relay_parent = hash;
+ r
+ };
+ let candidates = vec![
+ (make_candidate(1.into()), CoreIndex(0), GroupIndex(2)),
+ (make_candidate(2.into()), CoreIndex(1), GroupIndex(3)),
+ ];
+
+
+ let inclusion_events = candidates.iter().cloned()
+ .map(|(r, c, g)| CandidateEvent::CandidateIncluded(r, Vec::new().into(), c, g))
+ .collect::>();
+
+ let test_fut = {
+ let included_candidates = candidates.iter()
+ .map(|(r, c, g)| (r.hash(), r.clone(), *c, *g))
+ .collect::>();
+
+ let session_window = {
+ let mut window = RollingSessionWindow::default();
+
+ window.earliest_session = Some(session);
+ window.session_info.push(session_info);
+
+ window
+ };
+
+ let header = header.clone();
+ Box::pin(async move {
+ let env = ImportedBlockInfoEnv {
+ session_window: &session_window,
+ assignment_criteria: &MockAssignmentCriteria,
+ keystore: &LocalKeystore::in_memory(),
+ };
+
+ let info = imported_block_info(
+ &mut ctx,
+ env,
+ hash,
+ &header,
+ ).await.unwrap().unwrap();
+
+ assert_eq!(info.included_candidates, included_candidates);
+ assert_eq!(info.session_index, session);
+ assert!(info.assignments.is_empty());
+ assert_eq!(info.n_validators, 0);
+ assert_eq!(info.slot, slot);
+ })
+ };
+
+ let aux_fut = Box::pin(async move {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ h,
+ RuntimeApiRequest::CandidateEvents(c_tx),
+ )) => {
+ assert_eq!(h, hash);
+ let _ = c_tx.send(Ok(inclusion_events));
+ }
+ );
+
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ h,
+ RuntimeApiRequest::SessionIndexForChild(c_tx),
+ )) => {
+ assert_eq!(h, header.parent_hash);
+ let _ = c_tx.send(Ok(session));
+ }
+ );
+
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ h,
+ RuntimeApiRequest::CurrentBabeEpoch(c_tx),
+ )) => {
+ assert_eq!(h, hash);
+ let _ = c_tx.send(Ok(BabeEpoch {
+ epoch_index: session as _,
+ start_slot: Slot::from(0),
+ duration: 200,
+ authorities: vec![(Sr25519Keyring::Alice.public().into(), 1)],
+ randomness: [0u8; 32],
+ }));
+ }
+ );
+ });
+
+ futures::executor::block_on(futures::future::select(test_fut, aux_fut));
+ }
+
+ #[test]
+ fn imported_block_info_fails_if_no_babe_vrf() {
+ let pool = TaskExecutor::new();
+ let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
+
+ let session = 5;
+ let session_info = dummy_session_info(session);
+
+ let header = Header {
+ digest: Digest::default(),
+ extrinsics_root: Default::default(),
+ number: 5,
+ state_root: Default::default(),
+ parent_hash: Default::default(),
+ };
+
+ let hash = header.hash();
+ let make_candidate = |para_id| {
+ let mut r = CandidateReceipt::default();
+ r.descriptor.para_id = para_id;
+ r.descriptor.relay_parent = hash;
+ r
+ };
+ let candidates = vec![
+ (make_candidate(1.into()), CoreIndex(0), GroupIndex(2)),
+ (make_candidate(2.into()), CoreIndex(1), GroupIndex(3)),
+ ];
+
+ let inclusion_events = candidates.iter().cloned()
+ .map(|(r, c, g)| CandidateEvent::CandidateIncluded(r, Vec::new().into(), c, g))
+ .collect::>();
+
+ let test_fut = {
+ let session_window = {
+ let mut window = RollingSessionWindow::default();
+
+ window.earliest_session = Some(session);
+ window.session_info.push(session_info);
+
+ window
+ };
+
+ let header = header.clone();
+ Box::pin(async move {
+ let env = ImportedBlockInfoEnv {
+ session_window: &session_window,
+ assignment_criteria: &MockAssignmentCriteria,
+ keystore: &LocalKeystore::in_memory(),
+ };
+
+ let info = imported_block_info(
+ &mut ctx,
+ env,
+ hash,
+ &header,
+ ).await.unwrap();
+
+ assert!(info.is_none());
+ })
+ };
+
+ let aux_fut = Box::pin(async move {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ h,
+ RuntimeApiRequest::CandidateEvents(c_tx),
+ )) => {
+ assert_eq!(h, hash);
+ let _ = c_tx.send(Ok(inclusion_events));
+ }
+ );
+
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ h,
+ RuntimeApiRequest::SessionIndexForChild(c_tx),
+ )) => {
+ assert_eq!(h, header.parent_hash);
+ let _ = c_tx.send(Ok(session));
+ }
+ );
+
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ h,
+ RuntimeApiRequest::CurrentBabeEpoch(c_tx),
+ )) => {
+ assert_eq!(h, hash);
+ let _ = c_tx.send(Ok(BabeEpoch {
+ epoch_index: session as _,
+ start_slot: Slot::from(0),
+ duration: 200,
+ authorities: vec![(Sr25519Keyring::Alice.public().into(), 1)],
+ randomness: [0u8; 32],
+ }));
+ }
+ );
+ });
+
+ futures::executor::block_on(futures::future::select(test_fut, aux_fut));
+ }
+
+ #[test]
+ fn imported_block_info_fails_if_unknown_session() {
+ let pool = TaskExecutor::new();
+ let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
+
+ let session = 5;
+
+ let header = Header {
+ digest: Digest::default(),
+ extrinsics_root: Default::default(),
+ number: 5,
+ state_root: Default::default(),
+ parent_hash: Default::default(),
+ };
+
+ let hash = header.hash();
+ let make_candidate = |para_id| {
+ let mut r = CandidateReceipt::default();
+ r.descriptor.para_id = para_id;
+ r.descriptor.relay_parent = hash;
+ r
+ };
+ let candidates = vec![
+ (make_candidate(1.into()), CoreIndex(0), GroupIndex(2)),
+ (make_candidate(2.into()), CoreIndex(1), GroupIndex(3)),
+ ];
+
+ let inclusion_events = candidates.iter().cloned()
+ .map(|(r, c, g)| CandidateEvent::CandidateIncluded(r, Vec::new().into(), c, g))
+ .collect::>();
+
+ let test_fut = {
+ let session_window = RollingSessionWindow::default();
+
+ let header = header.clone();
+ Box::pin(async move {
+ let env = ImportedBlockInfoEnv {
+ session_window: &session_window,
+ assignment_criteria: &MockAssignmentCriteria,
+ keystore: &LocalKeystore::in_memory(),
+ };
+
+ let info = imported_block_info(
+ &mut ctx,
+ env,
+ hash,
+ &header,
+ ).await.unwrap();
+
+ assert!(info.is_none());
+ })
+ };
+
+ let aux_fut = Box::pin(async move {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ h,
+ RuntimeApiRequest::CandidateEvents(c_tx),
+ )) => {
+ assert_eq!(h, hash);
+ let _ = c_tx.send(Ok(inclusion_events));
+ }
+ );
+
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ h,
+ RuntimeApiRequest::SessionIndexForChild(c_tx),
+ )) => {
+ assert_eq!(h, header.parent_hash);
+ let _ = c_tx.send(Ok(session));
+ }
+ );
+ });
+
+ futures::executor::block_on(futures::future::select(test_fut, aux_fut));
+ }
+
+ fn cache_session_info_test(
+ session: SessionIndex,
+ mut window: RollingSessionWindow,
+ expect_requests_from: SessionIndex,
+ ) {
+ let start_session = session.saturating_sub(APPROVAL_SESSIONS - 1);
+
+ let header = Header {
+ digest: Digest::default(),
+ extrinsics_root: Default::default(),
+ number: 5,
+ state_root: Default::default(),
+ parent_hash: Default::default(),
+ };
+
+ let pool = TaskExecutor::new();
+ let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
+
+ let hash = header.hash();
+
+ let test_fut = {
+ let header = header.clone();
+ Box::pin(async move {
+ cache_session_info_for_head(
+ &mut ctx,
+ &mut window,
+ hash,
+ &header,
+ ).await.unwrap().unwrap();
+
+ assert_eq!(window.earliest_session, Some(0));
+ assert_eq!(
+ window.session_info,
+ (start_session..=session).map(dummy_session_info).collect::>(),
+ );
+ })
+ };
+
+ let aux_fut = Box::pin(async move {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ h,
+ RuntimeApiRequest::SessionIndexForChild(s_tx),
+ )) => {
+ assert_eq!(h, header.parent_hash);
+ let _ = s_tx.send(Ok(session));
+ }
+ );
+
+ for i in expect_requests_from..=session {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ h,
+ RuntimeApiRequest::SessionInfo(j, s_tx),
+ )) => {
+ assert_eq!(h, hash);
+ assert_eq!(i, j);
+ let _ = s_tx.send(Ok(Some(dummy_session_info(i))));
+ }
+ );
+ }
+ });
+
+ futures::executor::block_on(futures::future::select(test_fut, aux_fut));
+ }
+
+ #[test]
+ fn cache_session_info_first_early() {
+ cache_session_info_test(
+ 1,
+ RollingSessionWindow::default(),
+ 0,
+ );
+ }
+
+ #[test]
+ fn cache_session_info_first_late() {
+ cache_session_info_test(
+ 100,
+ RollingSessionWindow::default(),
+ (100 as SessionIndex).saturating_sub(APPROVAL_SESSIONS - 1),
+ );
+ }
+
+ #[test]
+ fn cache_session_info_jump() {
+ let window = RollingSessionWindow {
+ earliest_session: Some(50),
+ session_info: vec![dummy_session_info(50), dummy_session_info(51), dummy_session_info(52)],
+ };
+
+ cache_session_info_test(
+ 100,
+ window,
+ (100 as SessionIndex).saturating_sub(APPROVAL_SESSIONS - 1),
+ );
+ }
+
+ #[test]
+ fn cache_session_info_roll_full() {
+ let start = 99 - (APPROVAL_SESSIONS - 1);
+ let window = RollingSessionWindow {
+ earliest_session: Some(start),
+ session_info: (start..=99).map(dummy_session_info).collect(),
+ };
+
+ cache_session_info_test(
+ 100,
+ window,
+ 100, // should only make one request.
+ );
+ }
+
+ #[test]
+ fn cache_session_info_roll_many_full() {
+ let start = 97 - (APPROVAL_SESSIONS - 1);
+ let window = RollingSessionWindow {
+ earliest_session: Some(start),
+ session_info: (start..=97).map(dummy_session_info).collect(),
+ };
+
+ cache_session_info_test(
+ 100,
+ window,
+ 98,
+ );
+ }
+
+ #[test]
+ fn cache_session_info_roll_early() {
+ let start = 0;
+ let window = RollingSessionWindow {
+ earliest_session: Some(start),
+ session_info: (0..=1).map(dummy_session_info).collect(),
+ };
+
+ cache_session_info_test(
+ 2,
+ window,
+ 2, // should only make one request.
+ );
+ }
+
+ #[test]
+ fn cache_session_info_roll_many_early() {
+ let start = 0;
+ let window = RollingSessionWindow {
+ earliest_session: Some(start),
+ session_info: (0..=1).map(dummy_session_info).collect(),
+ };
+
+ cache_session_info_test(
+ 3,
+ window,
+ 2,
+ );
+ }
+
+ #[test]
+ fn any_session_unavailable_for_caching_means_no_change() {
+ let session: SessionIndex = 6;
+ let start_session = session.saturating_sub(APPROVAL_SESSIONS - 1);
+
+ let header = Header {
+ digest: Digest::default(),
+ extrinsics_root: Default::default(),
+ number: 5,
+ state_root: Default::default(),
+ parent_hash: Default::default(),
+ };
+
+ let pool = TaskExecutor::new();
+ let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
+
+ let mut window = RollingSessionWindow::default();
+ let hash = header.hash();
+
+ let test_fut = {
+ let header = header.clone();
+ Box::pin(async move {
+ let res = cache_session_info_for_head(
+ &mut ctx,
+ &mut window,
+ hash,
+ &header,
+ ).await.unwrap();
+
+ assert_matches!(res, Err(SessionsUnavailable));
+ })
+ };
+
+ let aux_fut = Box::pin(async move {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ h,
+ RuntimeApiRequest::SessionIndexForChild(s_tx),
+ )) => {
+ assert_eq!(h, header.parent_hash);
+ let _ = s_tx.send(Ok(session));
+ }
+ );
+
+ for i in start_session..=session {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ h,
+ RuntimeApiRequest::SessionInfo(j, s_tx),
+ )) => {
+ assert_eq!(h, hash);
+ assert_eq!(i, j);
+
+ let _ = s_tx.send(Ok(if i == session {
+ None
+ } else {
+ Some(dummy_session_info(i))
+ }));
+ }
+ );
+ }
+ });
+
+ futures::executor::block_on(futures::future::select(test_fut, aux_fut));
+ }
+
+ #[test]
+ fn request_session_info_for_genesis() {
+ let session: SessionIndex = 0;
+
+ let header = Header {
+ digest: Digest::default(),
+ extrinsics_root: Default::default(),
+ number: 0,
+ state_root: Default::default(),
+ parent_hash: Default::default(),
+ };
+
+ let pool = TaskExecutor::new();
+ let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
+
+ let mut window = RollingSessionWindow::default();
+ let hash = header.hash();
+
+ let test_fut = {
+ let header = header.clone();
+ Box::pin(async move {
+ cache_session_info_for_head(
+ &mut ctx,
+ &mut window,
+ hash,
+ &header,
+ ).await.unwrap().unwrap();
+
+ assert_eq!(window.earliest_session, Some(session));
+ assert_eq!(
+ window.session_info,
+ vec![dummy_session_info(session)],
+ );
+ })
+ };
+
+ let aux_fut = Box::pin(async move {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ h,
+ RuntimeApiRequest::SessionIndexForChild(s_tx),
+ )) => {
+ assert_eq!(h, hash);
+ let _ = s_tx.send(Ok(session));
+ }
+ );
+
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+ h,
+ RuntimeApiRequest::SessionInfo(s, s_tx),
+ )) => {
+ assert_eq!(h, hash);
+ assert_eq!(s, session);
+
+ let _ = s_tx.send(Ok(Some(dummy_session_info(s))));
+ }
+ );
+ });
+
+ futures::executor::block_on(futures::future::select(test_fut, aux_fut));
+ }
+}
diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs
index 270afee617a7..16d9c6810d27 100644
--- a/node/core/approval-voting/src/lib.rs
+++ b/node/core/approval-voting/src/lib.rs
@@ -21,7 +21,1334 @@
//! of others. It uses this information to determine when candidates and blocks have
//! been sufficiently approved to finalize.
-mod aux_schema;
+use polkadot_subsystem::{
+ messages::{
+ AssignmentCheckResult, ApprovalCheckResult, ApprovalVotingMessage,
+ RuntimeApiMessage, RuntimeApiRequest, ChainApiMessage, ApprovalDistributionMessage,
+ ValidationFailed, CandidateValidationMessage, AvailabilityRecoveryMessage,
+ },
+ errors::RecoveryError,
+ Subsystem, SubsystemContext, SubsystemError, SubsystemResult, SpawnedSubsystem,
+ FromOverseer, OverseerSignal,
+};
+use polkadot_primitives::v1::{
+ ValidatorIndex, Hash, SessionIndex, SessionInfo, CandidateHash,
+ CandidateReceipt, BlockNumber, PersistedValidationData,
+ ValidationCode, CandidateDescriptor, PoV, ValidatorPair, ValidatorSignature, ValidatorId,
+ CandidateIndex,
+};
+use polkadot_node_primitives::ValidationResult;
+use polkadot_node_primitives::approval::{
+ IndirectAssignmentCert, IndirectSignedApprovalVote, ApprovalVote, DelayTranche,
+};
+use parity_scale_codec::Encode;
+use sc_keystore::LocalKeystore;
+use sp_consensus_slots::Slot;
+use sc_client_api::backend::AuxStore;
+use sp_runtime::traits::AppVerify;
+use sp_application_crypto::Pair;
-/// A base unit of time, starting from the unix epoch, split into half-second intervals.
-type Tick = u64;
+use futures::prelude::*;
+use futures::channel::{mpsc, oneshot};
+
+use std::collections::{BTreeMap, HashMap};
+use std::collections::btree_map::Entry;
+use std::sync::Arc;
+use std::ops::{RangeBounds, Bound as RangeBound};
+
+use approval_checking::RequiredTranches;
+use persisted_entries::{ApprovalEntry, CandidateEntry, BlockEntry};
+use criteria::{AssignmentCriteria, RealAssignmentCriteria};
+use time::{slot_number_to_tick, Tick, Clock, ClockExt, SystemClock};
+
+mod approval_checking;
+mod approval_db;
+mod criteria;
+mod import;
+mod time;
+mod persisted_entries;
+
+#[cfg(test)]
+mod tests;
+
+const APPROVAL_SESSIONS: SessionIndex = 6;
+const LOG_TARGET: &str = "approval_voting";
+
+/// The approval voting subsystem.
+pub struct ApprovalVotingSubsystem {
+ keystore: LocalKeystore,
+ slot_duration_millis: u64,
+ db: Arc,
+}
+
+impl Subsystem for ApprovalVotingSubsystem
+ where T: AuxStore + Send + Sync + 'static, C: SubsystemContext {
+ fn start(self, ctx: C) -> SpawnedSubsystem {
+ let future = run::(
+ ctx,
+ self,
+ Box::new(SystemClock),
+ Box::new(RealAssignmentCriteria),
+ )
+ .map_err(|e| SubsystemError::with_origin("approval-voting", e))
+ .boxed();
+
+ SpawnedSubsystem {
+ name: "approval-voting-subsystem",
+ future,
+ }
+ }
+}
+
+enum BackgroundRequest {
+ ApprovalVote(ApprovalVoteRequest),
+ CandidateValidation(
+ PersistedValidationData,
+ ValidationCode,
+ CandidateDescriptor,
+ Arc,
+ oneshot::Sender>,
+ ),
+}
+
+struct ApprovalVoteRequest {
+ validator_index: ValidatorIndex,
+ block_hash: Hash,
+ candidate_index: usize,
+}
+
+#[derive(Default)]
+struct Wakeups {
+ // Tick -> [(Relay Block, Candidate Hash)]
+ wakeups: BTreeMap>,
+ reverse_wakeups: HashMap<(Hash, CandidateHash), Tick>,
+}
+
+impl Wakeups {
+ // Returns the first tick there exist wakeups for, if any.
+ fn first(&self) -> Option {
+ self.wakeups.keys().next().map(|t| *t)
+ }
+
+ // Schedules a wakeup at the given tick. no-op if there is already an earlier or equal wake-up
+ // for these values. replaces any later wakeup.
+ fn schedule(&mut self, block_hash: Hash, candidate_hash: CandidateHash, tick: Tick) {
+ if let Some(prev) = self.reverse_wakeups.get(&(block_hash, candidate_hash)) {
+ if prev <= &tick { return }
+
+ // we are replacing previous wakeup with an earlier one.
+ if let Entry::Occupied(mut entry) = self.wakeups.entry(*prev) {
+ if let Some(pos) = entry.get().iter()
+ .position(|x| x == &(block_hash, candidate_hash))
+ {
+ entry.get_mut().remove(pos);
+ }
+
+ if entry.get().is_empty() {
+ let _ = entry.remove_entry();
+ }
+ }
+ }
+
+ self.reverse_wakeups.insert((block_hash, candidate_hash), tick);
+ self.wakeups.entry(tick).or_default().push((block_hash, candidate_hash));
+ }
+
+ // drains all wakeups within the given range.
+ // panics if the given range is empty.
+ //
+ // only looks at the end bound of the range.
+ fn drain<'a, R: RangeBounds>(&'a mut self, range: R)
+ -> impl Iterator- + 'a
+ {
+ let reverse = &mut self.reverse_wakeups;
+
+ // BTreeMap has no `drain` method :(
+ let after = match range.end_bound() {
+ RangeBound::Unbounded => BTreeMap::new(),
+ RangeBound::Included(last) => self.wakeups.split_off(&(last + 1)),
+ RangeBound::Excluded(last) => self.wakeups.split_off(&last),
+ };
+ let prev = std::mem::replace(&mut self.wakeups, after);
+ prev.into_iter()
+ .flat_map(|(_, wakeup)| wakeup)
+ .inspect(move |&(ref b, ref c)| { let _ = reverse.remove(&(*b, *c)); })
+ }
+}
+
+/// A read-only handle to a database.
+trait DBReader {
+ fn load_block_entry(
+ &self,
+ block_hash: &Hash,
+ ) -> SubsystemResult
>;
+
+ fn load_candidate_entry(
+ &self,
+ candidate_hash: &CandidateHash,
+ ) -> SubsystemResult >;
+}
+
+// This is a submodule to enforce opacity of the inner DB type.
+mod approval_db_v1_reader {
+ use super::{
+ DBReader, AuxStore, Hash, CandidateHash, BlockEntry, CandidateEntry,
+ Arc, SubsystemResult, SubsystemError, approval_db,
+ };
+
+ /// A DB reader that uses the approval-db V1 under the hood.
+ pub(super) struct ApprovalDBV1Reader(Arc);
+
+ impl From> for ApprovalDBV1Reader {
+ fn from(a: Arc) -> Self {
+ ApprovalDBV1Reader(a)
+ }
+ }
+
+ impl DBReader for ApprovalDBV1Reader {
+ fn load_block_entry(
+ &self,
+ block_hash: &Hash,
+ ) -> SubsystemResult> {
+ approval_db::v1::load_block_entry(&*self.0, block_hash)
+ .map(|e| e.map(Into::into))
+ .map_err(|e| SubsystemError::with_origin("approval-voting", e))
+ }
+
+ fn load_candidate_entry(
+ &self,
+ candidate_hash: &CandidateHash,
+ ) -> SubsystemResult > {
+ approval_db::v1::load_candidate_entry(&*self.0, candidate_hash)
+ .map(|e| e.map(Into::into))
+ .map_err(|e| SubsystemError::with_origin("approval-voting", e))
+ }
+ }
+}
+use approval_db_v1_reader::ApprovalDBV1Reader;
+
+struct State {
+ session_window: import::RollingSessionWindow,
+ keystore: LocalKeystore,
+ slot_duration_millis: u64,
+ db: T,
+ clock: Box,
+ assignment_criteria: Box,
+}
+
+impl State {
+ fn session_info(&self, i: SessionIndex) -> Option<&SessionInfo> {
+ self.session_window.session_info(i)
+ }
+}
+
+#[derive(Debug)]
+enum Action {
+ ScheduleWakeup {
+ block_hash: Hash,
+ candidate_hash: CandidateHash,
+ tick: Tick,
+ },
+ WriteBlockEntry(BlockEntry),
+ WriteCandidateEntry(CandidateHash, CandidateEntry),
+ LaunchApproval {
+ indirect_cert: IndirectAssignmentCert,
+ candidate_index: CandidateIndex,
+ session: SessionIndex,
+ candidate: CandidateReceipt,
+ },
+ Conclude,
+}
+
+async fn run(
+ mut ctx: C,
+ subsystem: ApprovalVotingSubsystem,
+ clock: Box,
+ assignment_criteria: Box,
+) -> SubsystemResult<()>
+ where T: AuxStore + Send + Sync + 'static, C: SubsystemContext
+{
+ let (background_tx, background_rx) = mpsc::channel::(64);
+ let mut state = State {
+ session_window: Default::default(),
+ keystore: subsystem.keystore,
+ slot_duration_millis: subsystem.slot_duration_millis,
+ db: ApprovalDBV1Reader::from(subsystem.db.clone()),
+ clock,
+ assignment_criteria,
+ };
+
+ let mut wakeups = Wakeups::default();
+
+ let mut last_finalized_height: Option = None;
+ let mut background_rx = background_rx.fuse();
+
+ let db_writer = &*subsystem.db;
+
+ if let Err(e) = approval_db::v1::clear(db_writer) {
+ tracing::warn!(target: LOG_TARGET, "Failed to clear DB: {:?}", e);
+ return Err(SubsystemError::with_origin("db", e));
+ }
+
+ loop {
+ let wait_til_next_tick = match wakeups.first() {
+ None => future::Either::Left(future::pending()),
+ Some(tick) => future::Either::Right(
+ state.clock.wait(tick).map(move |()| tick)
+ ),
+ };
+ futures::pin_mut!(wait_til_next_tick);
+
+ let actions = futures::select! {
+ tick_wakeup = wait_til_next_tick.fuse() => {
+ let woken = wakeups.drain(..=tick_wakeup).collect::>();
+
+ let mut actions = Vec::new();
+ for (woken_block, woken_candidate) in woken {
+ actions.extend(process_wakeup(
+ &mut state,
+ woken_block,
+ woken_candidate,
+ )?);
+ }
+
+ actions
+ }
+ next_msg = ctx.recv().fuse() => {
+ handle_from_overseer(
+ &mut ctx,
+ &mut state,
+ db_writer,
+ next_msg?,
+ &mut last_finalized_height,
+ ).await?
+ }
+ background_request = background_rx.next().fuse() => {
+ if let Some(req) = background_request {
+ handle_background_request(
+ &mut ctx,
+ &mut state,
+ req,
+ ).await?
+ } else {
+ Vec::new()
+ }
+ }
+ };
+
+ if handle_actions(
+ &mut ctx,
+ &mut wakeups,
+ db_writer,
+ &background_tx,
+ actions,
+ ).await? {
+ break;
+ }
+ }
+
+ Ok(())
+}
+
+// returns `true` if any of the actions was a `Conclude` command.
+async fn handle_actions(
+ ctx: &mut impl SubsystemContext,
+ wakeups: &mut Wakeups,
+ db: &impl AuxStore,
+ background_tx: &mpsc::Sender,
+ actions: impl IntoIterator- ,
+) -> SubsystemResult
{
+ let mut transaction = approval_db::v1::Transaction::default();
+ let mut conclude = false;
+
+ for action in actions {
+ match action {
+ Action::ScheduleWakeup {
+ block_hash,
+ candidate_hash,
+ tick,
+ } => wakeups.schedule(block_hash, candidate_hash, tick),
+ Action::WriteBlockEntry(block_entry) => {
+ transaction.put_block_entry(block_entry.into());
+ }
+ Action::WriteCandidateEntry(candidate_hash, candidate_entry) => {
+ transaction.put_candidate_entry(candidate_hash, candidate_entry.into());
+ }
+ Action::LaunchApproval {
+ indirect_cert,
+ candidate_index,
+ session,
+ candidate,
+ } => {
+ let block_hash = indirect_cert.block_hash;
+ let validator_index = indirect_cert.validator;
+
+ ctx.send_message(ApprovalDistributionMessage::DistributeAssignment(
+ indirect_cert,
+ candidate_index,
+ ).into()).await;
+
+ launch_approval(
+ ctx,
+ background_tx.clone(),
+ session,
+ &candidate,
+ validator_index,
+ block_hash,
+ candidate_index as _,
+ ).await?
+ }
+ Action::Conclude => { conclude = true; }
+ }
+ }
+
+ transaction.write(db)
+ .map_err(|e| SubsystemError::with_origin("approval-voting", e))?;
+
+ Ok(conclude)
+}
+
+// Handle an incoming signal from the overseer. Returns true if execution should conclude.
+async fn handle_from_overseer(
+ ctx: &mut impl SubsystemContext,
+ state: &mut State,
+ db_writer: &impl AuxStore,
+ x: FromOverseer,
+ last_finalized_height: &mut Option,
+) -> SubsystemResult> {
+
+ let actions = match x {
+ FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => {
+ let mut actions = Vec::new();
+
+ for (head, _span) in update.activated {
+ match import::handle_new_head(
+ ctx,
+ state,
+ db_writer,
+ head,
+ &*last_finalized_height,
+ ).await {
+ Err(e) => return Err(SubsystemError::with_origin("db", e)),
+ Ok(block_imported_candidates) => {
+ // Schedule wakeups for all imported candidates.
+ for block_batch in block_imported_candidates {
+ for (c_hash, c_entry) in block_batch.imported_candidates {
+ let our_tranche = c_entry
+ .approval_entry(&block_batch.block_hash)
+ .and_then(|a| a.our_assignment().map(|a| a.tranche()));
+
+ if let Some(our_tranche) = our_tranche {
+ // Our first wakeup will just be the tranche of our assignment,
+ // if any. This will likely be superseded by incoming assignments
+ // and approvals which trigger rescheduling.
+ actions.push(Action::ScheduleWakeup {
+ block_hash: block_batch.block_hash,
+ candidate_hash: c_hash,
+ tick: our_tranche as Tick + block_batch.block_tick,
+ });
+ }
+ }
+ }
+ }
+ }
+ }
+
+ actions
+ }
+ FromOverseer::Signal(OverseerSignal::BlockFinalized(block_hash, block_number)) => {
+ *last_finalized_height = Some(block_number);
+
+ approval_db::v1::canonicalize(db_writer, block_number, block_hash)
+ .map_err(|e| SubsystemError::with_origin("db", e))?;
+
+ Vec::new()
+ }
+ FromOverseer::Signal(OverseerSignal::Conclude) => {
+ vec![Action::Conclude]
+ }
+ FromOverseer::Communication { msg } => match msg {
+ ApprovalVotingMessage::CheckAndImportAssignment(a, claimed_core, res) => {
+ let (check_outcome, actions) = check_and_import_assignment(state, a, claimed_core)?;
+ let _ = res.send(check_outcome);
+ actions
+ }
+ ApprovalVotingMessage::CheckAndImportApproval(a, res) => {
+ check_and_import_approval(state, a, |r| { let _ = res.send(r); })?.0
+ }
+ ApprovalVotingMessage::ApprovedAncestor(target, lower_bound, res ) => {
+ match handle_approved_ancestor(ctx, &state.db, target, lower_bound).await {
+ Ok(v) => {
+ let _ = res.send(v);
+ }
+ Err(e) => {
+ let _ = res.send(None);
+ return Err(e);
+ }
+ }
+
+ Vec::new()
+ }
+ }
+ };
+
+ Ok(actions)
+}
+
+async fn handle_background_request(
+ ctx: &mut impl SubsystemContext,
+ state: &State,
+ request: BackgroundRequest,
+) -> SubsystemResult> {
+ match request {
+ BackgroundRequest::ApprovalVote(vote_request) => {
+ issue_approval(ctx, state, vote_request).await
+ }
+ BackgroundRequest::CandidateValidation(
+ validation_data,
+ validation_code,
+ descriptor,
+ pov,
+ tx,
+ ) => {
+ ctx.send_message(CandidateValidationMessage::ValidateFromExhaustive(
+ validation_data,
+ validation_code,
+ descriptor,
+ pov,
+ tx,
+ ).into()).await;
+
+ Ok(Vec::new())
+ }
+ }
+}
+
+async fn handle_approved_ancestor(
+ ctx: &mut impl SubsystemContext,
+ db: &impl DBReader,
+ target: Hash,
+ lower_bound: BlockNumber,
+) -> SubsystemResult> {
+ let mut all_approved_max = None;
+
+ let block_number = {
+ let (tx, rx) = oneshot::channel();
+
+ ctx.send_message(ChainApiMessage::BlockNumber(target, tx).into()).await;
+
+ match rx.await? {
+ Ok(Some(n)) => n,
+ Ok(None) => return Ok(None),
+ Err(_) => return Ok(None),
+ }
+ };
+
+ if block_number <= lower_bound { return Ok(None) }
+
+ // request ancestors up to but not including the lower bound,
+ // as a vote on the lower bound is implied if we cannot find
+ // anything else.
+ let ancestry = if block_number > lower_bound + 1 {
+ let (tx, rx) = oneshot::channel();
+
+ ctx.send_message(ChainApiMessage::Ancestors {
+ hash: target,
+ k: (block_number - (lower_bound + 1)) as usize,
+ response_channel: tx,
+ }.into()).await;
+
+ match rx.await? {
+ Ok(a) => a,
+ Err(_) => return Ok(None),
+ }
+ } else {
+ Vec::new()
+ };
+
+ for block_hash in std::iter::once(target).chain(ancestry) {
+ // Block entries should be present as the assumption is that
+ // nothing here is finalized. If we encounter any missing block
+ // entries we can fail.
+ let entry = match db.load_block_entry(&block_hash)? {
+ None => return Ok(None),
+ Some(b) => b,
+ };
+
+ if entry.is_fully_approved() {
+ if all_approved_max.is_none() {
+ all_approved_max = Some(block_hash);
+ }
+ } else {
+ all_approved_max = None;
+ }
+ }
+
+ Ok(all_approved_max)
+}
+
+fn approval_signing_payload(
+ approval_vote: ApprovalVote,
+ session_index: SessionIndex,
+) -> Vec {
+ (approval_vote, session_index).encode()
+}
+
+// `Option::cmp` treats `None` as less than `Some`.
+fn min_prefer_some(
+ a: Option,
+ b: Option,
+) -> Option {
+ match (a, b) {
+ (None, None) => None,
+ (None, Some(x)) | (Some(x), None) => Some(x),
+ (Some(x), Some(y)) => Some(std::cmp::min(x, y)),
+ }
+}
+
+fn schedule_wakeup_action(
+ approval_entry: &ApprovalEntry,
+ block_hash: Hash,
+ candidate_hash: CandidateHash,
+ block_tick: Tick,
+ required_tranches: RequiredTranches,
+) -> Option {
+ if approval_entry.is_approved() {
+ return None
+ }
+
+ match required_tranches {
+ RequiredTranches::All => None,
+ RequiredTranches::Exact { next_no_show, .. } => next_no_show.map(|tick| Action::ScheduleWakeup {
+ block_hash,
+ candidate_hash,
+ tick,
+ }),
+ RequiredTranches::Pending { considered, next_no_show, clock_drift, .. } => {
+ // select the minimum of `next_no_show`, or the tick of the next non-empty tranche
+ // after `considered`, including any tranche that might contain our own untriggered
+ // assignment.
+ let next_non_empty_tranche = {
+ let next_announced = approval_entry.tranches().iter()
+ .skip_while(|t| t.tranche() <= considered)
+ .map(|t| t.tranche())
+ .next();
+
+ let our_untriggered = approval_entry
+ .our_assignment()
+ .and_then(|t| if !t.triggered() && t.tranche() > considered {
+ Some(t.tranche())
+ } else {
+ None
+ });
+
+ // Apply the clock drift to these tranches.
+ min_prefer_some(next_announced, our_untriggered)
+ .map(|t| t as Tick + block_tick + clock_drift)
+ };
+
+ min_prefer_some(next_non_empty_tranche, next_no_show)
+ .map(|tick| Action::ScheduleWakeup { block_hash, candidate_hash, tick })
+ }
+ }
+}
+
+fn check_and_import_assignment(
+ state: &State,
+ assignment: IndirectAssignmentCert,
+ candidate_index: CandidateIndex,
+) -> SubsystemResult<(AssignmentCheckResult, Vec)> {
+ const TICK_TOO_FAR_IN_FUTURE: Tick = 20; // 10 seconds.
+
+ let tick_now = state.clock.tick_now();
+ let block_entry = match state.db.load_block_entry(&assignment.block_hash)? {
+ Some(b) => b,
+ None => return Ok((AssignmentCheckResult::Bad, Vec::new())),
+ };
+
+ let session_info = match state.session_info(block_entry.session()) {
+ Some(s) => s,
+ None => {
+ tracing::warn!(target: LOG_TARGET, "Unknown session info for {}", block_entry.session());
+ return Ok((AssignmentCheckResult::Bad, Vec::new()));
+ }
+ };
+
+ let (claimed_core_index, assigned_candidate_hash)
+ = match block_entry.candidate(candidate_index as usize)
+ {
+ Some((c, h)) => (*c, *h),
+ None => return Ok((AssignmentCheckResult::Bad, Vec::new())), // no candidate at core.
+ };
+
+ let mut candidate_entry = match state.db.load_candidate_entry(&assigned_candidate_hash)? {
+ Some(c) => c,
+ None => {
+ tracing::warn!(
+ target: LOG_TARGET,
+ "Missing candidate entry {} referenced in live block {}",
+ assigned_candidate_hash,
+ assignment.block_hash,
+ );
+
+ return Ok((AssignmentCheckResult::Bad, Vec::new()));
+ }
+ };
+
+ let res = {
+ // import the assignment.
+ let approval_entry = match
+ candidate_entry.approval_entry_mut(&assignment.block_hash)
+ {
+ Some(a) => a,
+ None => return Ok((AssignmentCheckResult::Bad, Vec::new())),
+ };
+
+ let res = state.assignment_criteria.check_assignment_cert(
+ claimed_core_index,
+ assignment.validator,
+ &criteria::Config::from(session_info),
+ block_entry.relay_vrf_story(),
+ &assignment.cert,
+ approval_entry.backing_group(),
+ );
+
+ let tranche = match res {
+ Err(crate::criteria::InvalidAssignment) => return Ok((AssignmentCheckResult::Bad, Vec::new())),
+ Ok(tranche) => {
+ let current_tranche = state.clock.tranche_now(
+ state.slot_duration_millis,
+ block_entry.slot(),
+ );
+
+ let too_far_in_future = current_tranche + TICK_TOO_FAR_IN_FUTURE as DelayTranche;
+
+ if tranche >= too_far_in_future {
+ return Ok((AssignmentCheckResult::TooFarInFuture, Vec::new()));
+ }
+
+ tranche
+ }
+ };
+
+ let is_duplicate = approval_entry.is_assigned(assignment.validator);
+ approval_entry.import_assignment(tranche, assignment.validator, tick_now);
+
+ if is_duplicate {
+ AssignmentCheckResult::AcceptedDuplicate
+ } else {
+ AssignmentCheckResult::Accepted
+ }
+ };
+
+ // We check for approvals here because we may be late in seeing a block containing a
+ // candidate for which we have already seen approvals by the same validator.
+ //
+ // For these candidates, we will receive the assignments potentially after a corresponding
+ // approval, and so we must check for approval here.
+ //
+ // Note that this already produces actions for writing
+ // the candidate entry and any modified block entries to disk.
+ //
+ // It also produces actions to schedule wakeups for the candidate.
+ let actions = check_and_apply_full_approval(
+ state,
+ Some((assignment.block_hash, block_entry)),
+ assigned_candidate_hash,
+ candidate_entry,
+ |h, _| h == &assignment.block_hash,
+ )?;
+
+ Ok((res, actions))
+}
+
+fn check_and_import_approval(
+ state: &State,
+ approval: IndirectSignedApprovalVote,
+ with_response: impl FnOnce(ApprovalCheckResult) -> T,
+) -> SubsystemResult<(Vec, T)> {
+ macro_rules! respond_early {
+ ($e: expr) => { {
+ let t = with_response($e);
+ return Ok((Vec::new(), t));
+ } }
+ }
+
+ let block_entry = match state.db.load_block_entry(&approval.block_hash)? {
+ Some(b) => b,
+ None => respond_early!(ApprovalCheckResult::Bad)
+ };
+
+ let session_info = match state.session_info(block_entry.session()) {
+ Some(s) => s,
+ None => {
+ tracing::warn!(target: LOG_TARGET, "Unknown session info for {}", block_entry.session());
+ respond_early!(ApprovalCheckResult::Bad)
+ }
+ };
+
+ let approved_candidate_hash = match block_entry.candidate(approval.candidate_index as usize) {
+ Some((_, h)) => *h,
+ None => respond_early!(ApprovalCheckResult::Bad)
+ };
+
+ let approval_payload = approval_signing_payload(
+ ApprovalVote(approved_candidate_hash),
+ block_entry.session(),
+ );
+
+ let pubkey = match session_info.validators.get(approval.validator as usize) {
+ Some(k) => k,
+ None => respond_early!(ApprovalCheckResult::Bad)
+ };
+
+ let approval_sig_valid = approval.signature.verify(approval_payload.as_slice(), pubkey);
+
+ if !approval_sig_valid {
+ respond_early!(ApprovalCheckResult::Bad)
+ }
+
+ let candidate_entry = match state.db.load_candidate_entry(&approved_candidate_hash)? {
+ Some(c) => c,
+ None => {
+ tracing::warn!(
+ target: LOG_TARGET,
+ "Unknown candidate entry for {}",
+ approved_candidate_hash,
+ );
+
+ respond_early!(ApprovalCheckResult::Bad)
+ }
+ };
+
+ // Don't accept approvals until assignment.
+ if candidate_entry.approval_entry(&approval.block_hash)
+ .map_or(true, |e| !e.is_assigned(approval.validator))
+ {
+ respond_early!(ApprovalCheckResult::Bad)
+ }
+
+ // importing the approval can be heavy as it may trigger acceptance for a series of blocks.
+ let t = with_response(ApprovalCheckResult::Accepted);
+
+ let actions = import_checked_approval(
+ state,
+ Some((approval.block_hash, block_entry)),
+ approved_candidate_hash,
+ candidate_entry,
+ approval.validator,
+ )?;
+
+ Ok((actions, t))
+}
+
+fn import_checked_approval(
+ state: &State,
+ already_loaded: Option<(Hash, BlockEntry)>,
+ candidate_hash: CandidateHash,
+ mut candidate_entry: CandidateEntry,
+ validator: ValidatorIndex,
+) -> SubsystemResult> {
+ if candidate_entry.mark_approval(validator) {
+ // already approved - nothing to do here.
+ return Ok(Vec::new());
+ }
+
+ // Check if this approval vote alters the approval state of any blocks.
+ //
+ // This may include blocks beyond the already loaded block.
+ let actions = check_and_apply_full_approval(
+ state,
+ already_loaded,
+ candidate_hash,
+ candidate_entry,
+ |_, a| a.is_assigned(validator),
+ )?;
+
+ Ok(actions)
+}
+
+// Checks the candidate for full approval under all blocks matching the given filter.
+//
+// If returning without error, is guaranteed to have produced actions
+// to write all modified block entries. It also schedules wakeups for
+// the candidate under any blocks filtered.
+fn check_and_apply_full_approval(
+ state: &State,
+ mut already_loaded: Option<(Hash, BlockEntry)>,
+ candidate_hash: CandidateHash,
+ mut candidate_entry: CandidateEntry,
+ filter: impl Fn(&Hash, &ApprovalEntry) -> bool,
+) -> SubsystemResult> {
+ // We only query this max once per hash.
+ let db = &state.db;
+ let mut load_block_entry = move |block_hash| -> SubsystemResult> {
+ if already_loaded.as_ref().map_or(false, |(h, _)| h == block_hash) {
+ Ok(already_loaded.take().map(|(_, c)| c))
+ } else {
+ db.load_block_entry(block_hash)
+ }
+ };
+
+ let mut newly_approved = Vec::new();
+ let mut actions = Vec::new();
+ for (block_hash, approval_entry) in candidate_entry.iter_approval_entries()
+ .into_iter()
+ .filter(|(h, a)| !a.is_approved() && filter(h, a))
+ {
+ let mut block_entry = match load_block_entry(block_hash)? {
+ None => {
+ tracing::warn!(
+ target: LOG_TARGET,
+ "Missing block entry {} referenced by candidate {}",
+ block_hash,
+ candidate_hash,
+ );
+ continue
+ }
+ Some(b) => b,
+ };
+
+ let session_info = match state.session_info(block_entry.session()) {
+ Some(s) => s,
+ None => {
+ tracing::warn!(target: LOG_TARGET, "Unknown session info for {}", block_entry.session());
+ continue
+ }
+ };
+
+ let tranche_now = state.clock.tranche_now(state.slot_duration_millis, block_entry.slot());
+ let block_tick = slot_number_to_tick(state.slot_duration_millis, block_entry.slot());
+ let no_show_duration = slot_number_to_tick(
+ state.slot_duration_millis,
+ Slot::from(u64::from(session_info.no_show_slots)),
+ );
+
+ let required_tranches = approval_checking::tranches_to_approve(
+ approval_entry,
+ candidate_entry.approvals(),
+ tranche_now,
+ block_tick,
+ no_show_duration,
+ session_info.needed_approvals as _
+ );
+
+ let now_approved = approval_checking::check_approval(
+ &candidate_entry,
+ approval_entry,
+ required_tranches.clone(),
+ );
+
+ if now_approved {
+ newly_approved.push(*block_hash);
+ block_entry.mark_approved_by_hash(&candidate_hash);
+
+ actions.push(Action::WriteBlockEntry(block_entry));
+ }
+
+ actions.extend(schedule_wakeup_action(
+ &approval_entry,
+ *block_hash,
+ candidate_hash,
+ block_tick,
+ required_tranches,
+ ));
+ }
+
+ for b in &newly_approved {
+ if let Some(a) = candidate_entry.approval_entry_mut(b) {
+ a.mark_approved();
+ }
+ }
+
+ actions.push(Action::WriteCandidateEntry(candidate_hash, candidate_entry));
+ Ok(actions)
+}
+
+fn should_trigger_assignment(
+ approval_entry: &ApprovalEntry,
+ candidate_entry: &CandidateEntry,
+ required_tranches: RequiredTranches,
+ tranche_now: DelayTranche,
+) -> bool {
+ match approval_entry.our_assignment() {
+ None => false,
+ Some(ref assignment) if assignment.triggered() => false,
+ Some(ref assignment) => {
+ match required_tranches {
+ RequiredTranches::All => !approval_checking::check_approval(
+ &candidate_entry,
+ &approval_entry,
+ RequiredTranches::All,
+ ),
+ RequiredTranches::Pending {
+ maximum_broadcast,
+ clock_drift,
+ ..
+ } => {
+ let drifted_tranche_now
+ = tranche_now.saturating_sub(clock_drift as DelayTranche);
+ assignment.tranche() <= maximum_broadcast
+ && assignment.tranche() <= drifted_tranche_now
+ }
+ RequiredTranches::Exact { .. } => {
+ // indicates that no new assignments are needed at the moment.
+ false
+ }
+ }
+ }
+ }
+}
+
+fn process_wakeup(
+ state: &State,
+ relay_block: Hash,
+ candidate_hash: CandidateHash,
+) -> SubsystemResult> {
+ let block_entry = state.db.load_block_entry(&relay_block)?;
+ let candidate_entry = state.db.load_candidate_entry(&candidate_hash)?;
+
+ // If either is not present, we have nothing to wakeup. Might have lost a race with finality
+ let (block_entry, mut candidate_entry) = match (block_entry, candidate_entry) {
+ (Some(b), Some(c)) => (b, c),
+ _ => return Ok(Vec::new()),
+ };
+
+ let session_info = match state.session_info(block_entry.session()) {
+ Some(i) => i,
+ None => {
+ tracing::warn!(
+ target: LOG_TARGET,
+ "Missing session info for live block {} in session {}",
+ relay_block,
+ block_entry.session(),
+ );
+
+ return Ok(Vec::new())
+ }
+ };
+
+ let block_tick = slot_number_to_tick(state.slot_duration_millis, block_entry.slot());
+ let no_show_duration = slot_number_to_tick(
+ state.slot_duration_millis,
+ Slot::from(u64::from(session_info.no_show_slots)),
+ );
+
+ let tranche_now = state.clock.tranche_now(state.slot_duration_millis, block_entry.slot());
+
+ let should_trigger = {
+ let approval_entry = match candidate_entry.approval_entry(&relay_block) {
+ Some(e) => e,
+ None => return Ok(Vec::new()),
+ };
+
+ let tranches_to_approve = approval_checking::tranches_to_approve(
+ &approval_entry,
+ candidate_entry.approvals(),
+ tranche_now,
+ block_tick,
+ no_show_duration,
+ session_info.needed_approvals as _,
+ );
+
+ should_trigger_assignment(
+ &approval_entry,
+ &candidate_entry,
+ tranches_to_approve,
+ tranche_now,
+ )
+ };
+
+ let (mut actions, maybe_cert) = if should_trigger {
+ let maybe_cert = {
+ let approval_entry = candidate_entry.approval_entry_mut(&relay_block)
+ .expect("should_trigger only true if this fetched earlier; qed");
+
+ approval_entry.trigger_our_assignment(state.clock.tick_now())
+ };
+
+ let actions = vec![Action::WriteCandidateEntry(candidate_hash, candidate_entry.clone())];
+
+ (actions, maybe_cert)
+ } else {
+ (Vec::new(), None)
+ };
+
+ if let Some((cert, val_index)) = maybe_cert {
+ let indirect_cert = IndirectAssignmentCert {
+ block_hash: relay_block,
+ validator: val_index,
+ cert,
+ };
+
+ let index_in_candidate = block_entry.candidates().iter()
+ .position(|(_, h)| &candidate_hash == h);
+
+ if let Some(i) = index_in_candidate {
+ // sanity: should always be present.
+ actions.push(Action::LaunchApproval {
+ indirect_cert,
+ candidate_index: i as _,
+ session: block_entry.session(),
+ candidate: candidate_entry.candidate_receipt().clone(),
+ });
+ }
+ }
+
+ let approval_entry = candidate_entry.approval_entry(&relay_block)
+ .expect("this function returned earlier if not available; qed");
+
+ // Although we ran this earlier in the function, we need to run again because we might have
+ // imported our own assignment, which could change things.
+ let tranches_to_approve = approval_checking::tranches_to_approve(
+ &approval_entry,
+ candidate_entry.approvals(),
+ tranche_now,
+ block_tick,
+ no_show_duration,
+ session_info.needed_approvals as _,
+ );
+
+ actions.extend(schedule_wakeup_action(
+ &approval_entry,
+ relay_block,
+ candidate_hash,
+ block_tick,
+ tranches_to_approve,
+ ));
+
+ Ok(actions)
+}
+
+async fn launch_approval(
+ ctx: &mut impl SubsystemContext,
+ mut background_tx: mpsc::Sender,
+ session_index: SessionIndex,
+ candidate: &CandidateReceipt,
+ validator_index: ValidatorIndex,
+ block_hash: Hash,
+ candidate_index: usize,
+) -> SubsystemResult<()> {
+ let (a_tx, a_rx) = oneshot::channel();
+ let (code_tx, code_rx) = oneshot::channel();
+ let (context_num_tx, context_num_rx) = oneshot::channel();
+
+ ctx.send_message(AvailabilityRecoveryMessage::RecoverAvailableData(
+ candidate.clone(),
+ session_index,
+ a_tx,
+ ).into()).await;
+
+ ctx.send_message(
+ ChainApiMessage::BlockNumber(candidate.descriptor.relay_parent, context_num_tx).into()
+ ).await;
+
+ let in_context_number = match context_num_rx.await?
+ .map_err(|e| SubsystemError::with_origin("chain-api", e))?
+ {
+ Some(n) => n,
+ None => return Ok(()),
+ };
+
+ ctx.send_message(
+ RuntimeApiMessage::Request(
+ block_hash,
+ RuntimeApiRequest::HistoricalValidationCode(
+ candidate.descriptor.para_id,
+ in_context_number,
+ code_tx,
+ ),
+ ).into()
+ ).await;
+
+ let candidate = candidate.clone();
+ let background = async move {
+ let available_data = match a_rx.await {
+ Err(_) => return,
+ Ok(Ok(a)) => a,
+ Ok(Err(RecoveryError::Unavailable)) => {
+ // do nothing. we'll just be a no-show and that'll cause others to rise up.
+ return;
+ }
+ Ok(Err(RecoveryError::Invalid)) => {
+ // TODO: dispute. Either the merkle trie is bad or the erasure root is.
+ // https://github.com/paritytech/polkadot/issues/2176
+ return;
+ }
+ };
+
+ let validation_code = match code_rx.await {
+ Err(_) => return,
+ Ok(Err(_)) => return,
+ Ok(Ok(Some(code))) => code,
+ Ok(Ok(None)) => {
+ tracing::warn!(
+ target: LOG_TARGET,
+ "Validation code unavailable for block {:?} in the state of block {:?} (a recent descendant)",
+ candidate.descriptor.relay_parent,
+ block_hash,
+ );
+
+ // No dispute necessary, as this indicates that the chain is not behaving
+ // according to expectations.
+ return;
+ }
+ };
+
+ let (val_tx, val_rx) = oneshot::channel();
+
+ let _ = background_tx.send(BackgroundRequest::CandidateValidation(
+ available_data.validation_data,
+ validation_code,
+ candidate.descriptor,
+ available_data.pov,
+ val_tx,
+ )).await;
+
+ match val_rx.await {
+ Err(_) => return,
+ Ok(Ok(ValidationResult::Valid(_, _))) => {
+ // Validation checked out. Issue an approval command. If the underlying service is unreachable,
+ // then there isn't anything we can do.
+
+ let _ = background_tx.send(BackgroundRequest::ApprovalVote(ApprovalVoteRequest {
+ validator_index,
+ block_hash,
+ candidate_index,
+ })).await;
+ }
+ Ok(Ok(ValidationResult::Invalid(_))) => {
+ // TODO: issue dispute, but not for timeouts.
+ // https://github.com/paritytech/polkadot/issues/2176
+ }
+ Ok(Err(_)) => return, // internal error.
+ }
+ };
+
+ ctx.spawn("approval-checks", Box::pin(background)).await
+}
+
+// Issue and import a local approval vote. Should only be invoked after approval checks
+// have been done.
+async fn issue_approval(
+ ctx: &mut impl SubsystemContext,
+ state: &State,
+ request: ApprovalVoteRequest,
+) -> SubsystemResult> {
+ let ApprovalVoteRequest { validator_index, block_hash, candidate_index } = request;
+
+ let block_entry = match state.db.load_block_entry(&block_hash)? {
+ Some(b) => b,
+ None => return Ok(Vec::new()), // not a cause for alarm - just lost a race with pruning, most likely.
+ };
+
+ let session_info = match state.session_info(block_entry.session()) {
+ Some(s) => s,
+ None => {
+ tracing::warn!(
+ target: LOG_TARGET,
+ "Missing session info for live block {} in session {}",
+ block_hash,
+ block_entry.session(),
+ );
+
+ return Ok(Vec::new());
+ }
+ };
+
+ let candidate_hash = match block_entry.candidate(candidate_index) {
+ Some((_, h)) => h.clone(),
+ None => {
+ tracing::warn!(
+ target: LOG_TARGET,
+ "Received malformed request to approve out-of-bounds candidate index {} included at block {:?}",
+ candidate_index,
+ block_hash,
+ );
+
+ return Ok(Vec::new());
+ }
+ };
+
+ let candidate_entry = match state.db.load_candidate_entry(&candidate_hash)? {
+ Some(c) => c,
+ None => {
+ tracing::warn!(
+ target: LOG_TARGET,
+ "Missing entry for candidate index {} included at block {:?}",
+ candidate_index,
+ block_hash,
+ );
+
+ return Ok(Vec::new());
+ }
+ };
+
+ let validator_pubkey = match session_info.validators.get(validator_index as usize) {
+ Some(p) => p,
+ None => {
+ tracing::warn!(
+ target: LOG_TARGET,
+ "Validator index {} out of bounds in session {}",
+ validator_index,
+ block_entry.session(),
+ );
+
+ return Ok(Vec::new());
+ }
+ };
+
+ let sig = match sign_approval(
+ &state.keystore,
+ &validator_pubkey,
+ candidate_hash,
+ block_entry.session(),
+ ) {
+ Some(sig) => sig,
+ None => {
+ tracing::warn!(
+ target: LOG_TARGET,
+ "Could not issue approval signature with validator index {} in session {}. Assignment key present but not validator key?",
+ validator_index,
+ block_entry.session(),
+ );
+
+ return Ok(Vec::new());
+ }
+ };
+
+ let actions = import_checked_approval(
+ state,
+ Some((block_hash, block_entry)),
+ candidate_hash,
+ candidate_entry,
+ validator_index as _,
+ )?;
+
+ // dispatch to approval distribution.
+ ctx.send_message(ApprovalDistributionMessage::DistributeApproval(IndirectSignedApprovalVote {
+ block_hash,
+ candidate_index: candidate_index as _,
+ validator: validator_index,
+ signature: sig,
+ }).into()).await;
+
+ Ok(actions)
+}
+
+// Sign an approval vote. Fails if the key isn't present in the store.
+fn sign_approval(
+ keystore: &LocalKeystore,
+ public: &ValidatorId,
+ candidate_hash: CandidateHash,
+ session_index: SessionIndex,
+) -> Option {
+ let key = keystore.key_pair::(public).ok()?;
+
+ let payload = approval_signing_payload(
+ ApprovalVote(candidate_hash),
+ session_index,
+ );
+
+ Some(key.sign(&payload[..]))
+}
diff --git a/node/core/approval-voting/src/persisted_entries.rs b/node/core/approval-voting/src/persisted_entries.rs
new file mode 100644
index 000000000000..fce648259099
--- /dev/null
+++ b/node/core/approval-voting/src/persisted_entries.rs
@@ -0,0 +1,412 @@
+// Copyright 2020 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! Entries pertaining to approval which need to be persisted.
+//!
+//! The actual persisting of data is handled by the `approval_db` module.
+//! Within that context, things are plain-old-data. Within this module,
+//! data and logic are intertwined.
+
+use polkadot_node_primitives::approval::{DelayTranche, RelayVRFStory, AssignmentCert};
+use polkadot_primitives::v1::{
+ ValidatorIndex, CandidateReceipt, SessionIndex, GroupIndex, CoreIndex,
+ Hash, CandidateHash,
+};
+use sp_consensus_slots::Slot;
+
+use std::collections::BTreeMap;
+use bitvec::{slice::BitSlice, vec::BitVec, order::Lsb0 as BitOrderLsb0};
+
+use super::time::Tick;
+use super::criteria::OurAssignment;
+
+/// Metadata regarding a specific tranche of assignments for a specific candidate.
+#[derive(Debug, Clone, PartialEq)]
+pub struct TrancheEntry {
+ tranche: DelayTranche,
+ // Assigned validators, and the instant we received their assignment, rounded
+ // to the nearest tick.
+ assignments: Vec<(ValidatorIndex, Tick)>,
+}
+
+impl TrancheEntry {
+ /// Get the tranche of this entry.
+ pub fn tranche(&self) -> DelayTranche {
+ self.tranche
+ }
+
+ /// Get the assignments for this entry.
+ pub fn assignments(&self) -> &[(ValidatorIndex, Tick)] {
+ &self.assignments
+ }
+}
+
+impl From for TrancheEntry {
+ fn from(entry: crate::approval_db::v1::TrancheEntry) -> Self {
+ TrancheEntry {
+ tranche: entry.tranche,
+ assignments: entry.assignments.into_iter().map(|(v, t)| (v, t.into())).collect(),
+ }
+ }
+}
+
+impl From for crate::approval_db::v1::TrancheEntry {
+ fn from(entry: TrancheEntry) -> Self {
+ Self {
+ tranche: entry.tranche,
+ assignments: entry.assignments.into_iter().map(|(v, t)| (v, t.into())).collect(),
+ }
+ }
+}
+
+/// Metadata regarding approval of a particular candidate within the context of some
+/// particular block.
+#[derive(Debug, Clone, PartialEq)]
+pub struct ApprovalEntry {
+ tranches: Vec,
+ backing_group: GroupIndex,
+ our_assignment: Option,
+ // `n_validators` bits.
+ assignments: BitVec,
+ approved: bool,
+}
+
+impl ApprovalEntry {
+ // Access our assignment for this approval entry.
+ pub fn our_assignment(&self) -> Option<&OurAssignment> {
+ self.our_assignment.as_ref()
+ }
+
+ // Note that our assignment is triggered. No-op if already triggered.
+ pub fn trigger_our_assignment(&mut self, tick_now: Tick)
+ -> Option<(AssignmentCert, ValidatorIndex)>
+ {
+ let our = self.our_assignment.as_mut().and_then(|a| {
+ if a.triggered() { return None }
+ a.mark_triggered();
+
+ Some(a.clone())
+ });
+
+ our.map(|a| {
+ self.import_assignment(a.tranche(), a.validator_index(), tick_now);
+
+ (a.cert().clone(), a.validator_index())
+ })
+ }
+
+ /// Whether a validator is already assigned.
+ pub fn is_assigned(&self, validator_index: ValidatorIndex) -> bool {
+ self.assignments.get(validator_index as usize).map(|b| *b).unwrap_or(false)
+ }
+
+ /// Import an assignment. No-op if already assigned on the same tranche.
+ pub fn import_assignment(
+ &mut self,
+ tranche: DelayTranche,
+ validator_index: ValidatorIndex,
+ tick_now: Tick,
+ ) {
+ // linear search probably faster than binary. not many tranches typically.
+ let idx = match self.tranches.iter().position(|t| t.tranche >= tranche) {
+ Some(pos) => {
+ if self.tranches[pos].tranche > tranche {
+ self.tranches.insert(pos, TrancheEntry {
+ tranche: tranche,
+ assignments: Vec::new(),
+ });
+ }
+
+ pos
+ }
+ None => {
+ self.tranches.push(TrancheEntry {
+ tranche: tranche,
+ assignments: Vec::new(),
+ });
+
+ self.tranches.len() - 1
+ }
+ };
+
+ self.tranches[idx].assignments.push((validator_index, tick_now));
+ self.assignments.set(validator_index as _, true);
+ }
+
+ // Produce a bitvec indicating the assignments of all validators up to and
+ // including `tranche`.
+ pub fn assignments_up_to(&self, tranche: DelayTranche) -> BitVec {
+ self.tranches.iter()
+ .take_while(|e| e.tranche <= tranche)
+ .fold(bitvec::bitvec![BitOrderLsb0, u8; 0; self.assignments.len()], |mut a, e| {
+ for &(v, _) in &e.assignments {
+ a.set(v as _, true);
+ }
+
+ a
+ })
+ }
+
+ /// Whether the approval entry is approved
+ pub fn is_approved(&self) -> bool {
+ self.approved
+ }
+
+ /// Mark the approval entry as approved.
+ pub fn mark_approved(&mut self) {
+ self.approved = true;
+ }
+
+ /// Access the tranches.
+ pub fn tranches(&self) -> &[TrancheEntry] {
+ &self.tranches
+ }
+
+ /// Get the number of validators in this approval entry.
+ pub fn n_validators(&self) -> usize {
+ self.assignments.len()
+ }
+
+ /// Get the backing group index of the approval entry.
+ pub fn backing_group(&self) -> GroupIndex {
+ self.backing_group
+ }
+
+ /// For tests: set our assignment.
+ #[cfg(test)]
+ pub fn set_our_assignment(&mut self, our_assignment: OurAssignment) {
+ self.our_assignment = Some(our_assignment);
+ }
+}
+
+impl From for ApprovalEntry {
+ fn from(entry: crate::approval_db::v1::ApprovalEntry) -> Self {
+ ApprovalEntry {
+ tranches: entry.tranches.into_iter().map(Into::into).collect(),
+ backing_group: entry.backing_group,
+ our_assignment: entry.our_assignment.map(Into::into),
+ assignments: entry.assignments,
+ approved: entry.approved,
+ }
+ }
+}
+
+impl From for crate::approval_db::v1::ApprovalEntry {
+ fn from(entry: ApprovalEntry) -> Self {
+ Self {
+ tranches: entry.tranches.into_iter().map(Into::into).collect(),
+ backing_group: entry.backing_group,
+ our_assignment: entry.our_assignment.map(Into::into),
+ assignments: entry.assignments,
+ approved: entry.approved,
+ }
+ }
+}
+
+/// Metadata regarding approval of a particular candidate.
+#[derive(Debug, Clone, PartialEq)]
+pub struct CandidateEntry {
+ candidate: CandidateReceipt,
+ session: SessionIndex,
+ // Assignments are based on blocks, so we need to track assignments separately
+ // based on the block we are looking at.
+ block_assignments: BTreeMap,
+ approvals: BitVec,
+}
+
+impl CandidateEntry {
+ /// Access the bit-vec of approvals.
+ pub fn approvals(&self) -> &BitSlice {
+ &self.approvals
+ }
+
+ /// Note that a given validator has approved. Return the previous approval state.
+ pub fn mark_approval(&mut self, validator: ValidatorIndex) -> bool {
+ let prev = self.approvals.get(validator as usize).map(|b| *b).unwrap_or(false);
+ self.approvals.set(validator as usize, true);
+ prev
+ }
+
+ /// Get the candidate receipt.
+ pub fn candidate_receipt(&self) -> &CandidateReceipt {
+ &self.candidate
+ }
+
+ /// Get the approval entry, mutably, for this candidate under a specific block.
+ pub fn approval_entry_mut(&mut self, block_hash: &Hash) -> Option<&mut ApprovalEntry> {
+ self.block_assignments.get_mut(block_hash)
+ }
+
+ /// Get the approval entry for this candidate under a specific block.
+ pub fn approval_entry(&self, block_hash: &Hash) -> Option<&ApprovalEntry> {
+ self.block_assignments.get(block_hash)
+ }
+
+ /// Iterate over approval entries.
+ pub fn iter_approval_entries(&self) -> impl IntoIterator- {
+ self.block_assignments.iter()
+ }
+
+ #[cfg(test)]
+ pub fn add_approval_entry(
+ &mut self,
+ block_hash: Hash,
+ approval_entry: ApprovalEntry,
+ ) {
+ self.block_assignments.insert(block_hash, approval_entry);
+ }
+}
+
+impl From
for CandidateEntry {
+ fn from(entry: crate::approval_db::v1::CandidateEntry) -> Self {
+ CandidateEntry {
+ candidate: entry.candidate,
+ session: entry.session,
+ block_assignments: entry.block_assignments.into_iter().map(|(h, ae)| (h, ae.into())).collect(),
+ approvals: entry.approvals,
+ }
+ }
+}
+
+impl From for crate::approval_db::v1::CandidateEntry {
+ fn from(entry: CandidateEntry) -> Self {
+ Self {
+ candidate: entry.candidate,
+ session: entry.session,
+ block_assignments: entry.block_assignments.into_iter().map(|(h, ae)| (h, ae.into())).collect(),
+ approvals: entry.approvals,
+ }
+ }
+}
+
+/// Metadata regarding approval of a particular block, by way of approval of the
+/// candidates contained within it.
+#[derive(Debug, Clone, PartialEq)]
+pub struct BlockEntry {
+ block_hash: Hash,
+ session: SessionIndex,
+ slot: Slot,
+ relay_vrf_story: RelayVRFStory,
+ // The candidates included as-of this block and the index of the core they are
+ // leaving. Sorted ascending by core index.
+ candidates: Vec<(CoreIndex, CandidateHash)>,
+ // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`.
+ // The i'th bit is `true` iff the candidate has been approved in the context of this
+ // block. The block can be considered approved if the bitfield has all bits set to `true`.
+ approved_bitfield: BitVec,
+ children: Vec,
+}
+
+impl BlockEntry {
+ /// Mark a candidate as fully approved in the bitfield.
+ pub fn mark_approved_by_hash(&mut self, candidate_hash: &CandidateHash) {
+ if let Some(p) = self.candidates.iter().position(|(_, h)| h == candidate_hash) {
+ self.approved_bitfield.set(p, true);
+ }
+ }
+
+ /// Whether the block entry is fully approved.
+ pub fn is_fully_approved(&self) -> bool {
+ self.approved_bitfield.all()
+ }
+
+ #[cfg(test)]
+ pub fn block_hash(&self) -> Hash {
+ self.block_hash
+ }
+
+ #[cfg(test)]
+ pub fn is_candidate_approved(&self, candidate_hash: &CandidateHash) -> bool {
+ self.candidates.iter().position(|(_, h)| h == candidate_hash)
+ .and_then(|p| self.approved_bitfield.get(p).map(|b| *b))
+ .unwrap_or(false)
+ }
+
+ /// For tests: Add a candidate to the block entry. Returns the
+ /// index where the candidate was added.
+ ///
+ /// Panics if the core is already used.
+ #[cfg(test)]
+ pub fn add_candidate(&mut self, core: CoreIndex, candidate_hash: CandidateHash) -> usize {
+ let pos = self.candidates
+ .binary_search_by_key(&core, |(c, _)| *c)
+ .unwrap_err();
+
+ self.candidates.insert(pos, (core, candidate_hash));
+
+ // bug in bitvec?
+ if pos < self.approved_bitfield.len() {
+ self.approved_bitfield.insert(pos, false);
+ } else {
+ self.approved_bitfield.push(false);
+ }
+
+ pos
+ }
+
+ /// Get the slot of the block.
+ pub fn slot(&self) -> Slot {
+ self.slot
+ }
+
+ /// Get the relay-vrf-story of the block.
+ pub fn relay_vrf_story(&self) -> RelayVRFStory {
+ self.relay_vrf_story.clone()
+ }
+
+ /// Get the session index of the block.
+ pub fn session(&self) -> SessionIndex {
+ self.session
+ }
+
+ /// Get the i'th candidate.
+ pub fn candidate(&self, i: usize) -> Option<&(CoreIndex, CandidateHash)> {
+ self.candidates.get(i)
+ }
+
+ /// Access the underlying candidates as a slice.
+ pub fn candidates(&self) -> &[(CoreIndex, CandidateHash)] {
+ &self.candidates
+ }
+}
+
+impl From for BlockEntry {
+ fn from(entry: crate::approval_db::v1::BlockEntry) -> Self {
+ BlockEntry {
+ block_hash: entry.block_hash,
+ session: entry.session,
+ slot: entry.slot,
+ relay_vrf_story: RelayVRFStory(entry.relay_vrf_story),
+ candidates: entry.candidates,
+ approved_bitfield: entry.approved_bitfield,
+ children: entry.children,
+ }
+ }
+}
+
+impl From for crate::approval_db::v1::BlockEntry {
+ fn from(entry: BlockEntry) -> Self {
+ Self {
+ block_hash: entry.block_hash,
+ session: entry.session,
+ slot: entry.slot,
+ relay_vrf_story: entry.relay_vrf_story.0,
+ candidates: entry.candidates,
+ approved_bitfield: entry.approved_bitfield,
+ children: entry.children,
+ }
+ }
+}
diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs
new file mode 100644
index 000000000000..95e23998f153
--- /dev/null
+++ b/node/core/approval-voting/src/tests.rs
@@ -0,0 +1,1750 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+use super::*;
+use polkadot_primitives::v1::{CoreIndex, GroupIndex, ValidatorSignature};
+use polkadot_node_primitives::approval::{
+ AssignmentCert, AssignmentCertKind, VRFOutput, VRFProof,
+ RELAY_VRF_MODULO_CONTEXT, DelayTranche,
+};
+use polkadot_node_subsystem_test_helpers::make_subsystem_context;
+use polkadot_subsystem::messages::AllMessages;
+use sp_core::testing::TaskExecutor;
+
+use parking_lot::Mutex;
+use bitvec::order::Lsb0 as BitOrderLsb0;
+use std::pin::Pin;
+use std::sync::Arc;
+use sp_keyring::sr25519::Keyring as Sr25519Keyring;
+use assert_matches::assert_matches;
+
+const SLOT_DURATION_MILLIS: u64 = 5000;
+
+fn slot_to_tick(t: impl Into) -> crate::time::Tick {
+ crate::time::slot_number_to_tick(SLOT_DURATION_MILLIS, t.into())
+}
+
+#[derive(Default)]
+struct MockClock {
+ inner: Arc>,
+}
+
+impl MockClock {
+ fn new(tick: Tick) -> Self {
+ let me = Self::default();
+ me.inner.lock().set_tick(tick);
+ me
+ }
+}
+
+impl Clock for MockClock {
+ fn tick_now(&self) -> Tick {
+ self.inner.lock().tick
+ }
+
+ fn wait(&self, tick: Tick) -> Pin + Send + 'static>> {
+ let rx = self.inner.lock().register_wakeup(tick, true);
+
+ Box::pin(async move {
+ rx.await.expect("i exist in a timeless void. yet, i remain");
+ })
+ }
+}
+
+// This mock clock allows us to manipulate the time and
+// be notified when wakeups have been triggered.
+#[derive(Default)]
+struct MockClockInner {
+ tick: Tick,
+ wakeups: Vec<(Tick, oneshot::Sender<()>)>,
+}
+
+impl MockClockInner {
+ fn set_tick(&mut self, tick: Tick) {
+ self.tick = tick;
+ self.wakeup_all(tick);
+ }
+
+ fn wakeup_all(&mut self, up_to: Tick) {
+ // This finds the position of the first wakeup after
+ // the given tick, or the end of the map.
+ let drain_up_to = self.wakeups.binary_search_by_key(
+ &(up_to + 1),
+ |w| w.0,
+ ).unwrap_or_else(|i| i);
+
+ for (_, wakeup) in self.wakeups.drain(..drain_up_to) {
+ let _ = wakeup.send(());
+ }
+ }
+
+ // If `pre_emptive` is true, we compare the given tick to the internal
+ // tick of the clock for an early return.
+ //
+ // Otherwise, the wakeup will only trigger alongside another wakeup of
+ // equal or greater tick.
+ //
+ // When the pre-emptive wakeup is disabled, this can be used in combination with
+ // a preceding call to `set_tick` to wait until some other wakeup at that same tick
+ // has been triggered.
+ fn register_wakeup(&mut self, tick: Tick, pre_emptive: bool) -> oneshot::Receiver<()> {
+ let (tx, rx) = oneshot::channel();
+
+ let pos = self.wakeups.binary_search_by_key(
+ &tick,
+ |w| w.0,
+ ).unwrap_or_else(|i| i);
+
+ self.wakeups.insert(pos, (tick, tx));
+
+ if pre_emptive {
+ // if `tick > self.tick`, this won't wake up the new
+ // listener.
+ self.wakeup_all(self.tick);
+ }
+
+ rx
+ }
+}
+
+struct MockAssignmentCriteria(Compute, Check);
+
+impl AssignmentCriteria for MockAssignmentCriteria
+where
+ Compute: Fn() -> HashMap,
+ Check: Fn() -> Result
+{
+ fn compute_assignments(
+ &self,
+ _keystore: &LocalKeystore,
+ _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory,
+ _config: &criteria::Config,
+ _leaving_cores: Vec<(polkadot_primitives::v1::CoreIndex, polkadot_primitives::v1::GroupIndex)>,
+ ) -> HashMap {
+ self.0()
+ }
+
+ fn check_assignment_cert(
+ &self,
+ _claimed_core_index: polkadot_primitives::v1::CoreIndex,
+ _validator_index: ValidatorIndex,
+ _config: &criteria::Config,
+ _relay_vrf_story: polkadot_node_primitives::approval::RelayVRFStory,
+ _assignment: &polkadot_node_primitives::approval::AssignmentCert,
+ _backing_group: polkadot_primitives::v1::GroupIndex,
+ ) -> Result {
+ self.1()
+ }
+}
+
+impl MockAssignmentCriteria<
+ fn() -> HashMap,
+ F,
+> {
+ fn check_only(f: F) -> Self {
+ MockAssignmentCriteria(Default::default, f)
+ }
+}
+
+#[derive(Default)]
+struct TestStore {
+ block_entries: HashMap,
+ candidate_entries: HashMap,
+}
+
+impl DBReader for TestStore {
+ fn load_block_entry(
+ &self,
+ block_hash: &Hash,
+ ) -> SubsystemResult> {
+ Ok(self.block_entries.get(block_hash).cloned())
+ }
+
+ fn load_candidate_entry(
+ &self,
+ candidate_hash: &CandidateHash,
+ ) -> SubsystemResult > {
+ Ok(self.candidate_entries.get(candidate_hash).cloned())
+ }
+}
+
+fn blank_state() -> State {
+ State {
+ session_window: import::RollingSessionWindow::default(),
+ keystore: LocalKeystore::in_memory(),
+ slot_duration_millis: SLOT_DURATION_MILLIS,
+ db: TestStore::default(),
+ clock: Box::new(MockClock::default()),
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| { Ok(0) })),
+ }
+}
+
+fn single_session_state(index: SessionIndex, info: SessionInfo)
+ -> State
+{
+ State {
+ session_window: import::RollingSessionWindow {
+ earliest_session: Some(index),
+ session_info: vec![info],
+ },
+ ..blank_state()
+ }
+}
+
+fn garbage_assignment_cert(kind: AssignmentCertKind) -> AssignmentCert {
+ let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT);
+ let msg = b"test-garbage";
+ let mut prng = rand_core::OsRng;
+ let keypair = schnorrkel::Keypair::generate_with(&mut prng);
+ let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg));
+ let out = inout.to_output();
+
+ AssignmentCert {
+ kind,
+ vrf: (VRFOutput(out), VRFProof(proof)),
+ }
+}
+
+fn sign_approval(
+ key: Sr25519Keyring,
+ candidate_hash: CandidateHash,
+ session_index: SessionIndex,
+) -> ValidatorSignature {
+ key.sign(&super::approval_signing_payload(ApprovalVote(candidate_hash), session_index)).into()
+}
+
+struct StateConfig {
+ session_index: SessionIndex,
+ slot: Slot,
+ tick: Tick,
+ validators: Vec,
+ validator_groups: Vec>,
+ needed_approvals: u32,
+ no_show_slots: u32,
+}
+
+impl Default for StateConfig {
+ fn default() -> Self {
+ StateConfig {
+ session_index: 1,
+ slot: Slot::from(0),
+ tick: 0,
+ validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob],
+ validator_groups: vec![vec![0], vec![1]],
+ needed_approvals: 1,
+ no_show_slots: 2,
+ }
+ }
+}
+
+// one block with one candidate. Alice and Bob are in the assignment keys.
+fn some_state(config: StateConfig) -> State {
+ let StateConfig {
+ session_index,
+ slot,
+ tick,
+ validators,
+ validator_groups,
+ needed_approvals,
+ no_show_slots,
+ } = config;
+
+ let n_validators = validators.len();
+
+ let mut state = State {
+ clock: Box::new(MockClock::new(tick)),
+ ..single_session_state(session_index, SessionInfo {
+ validators: validators.iter().map(|v| v.public().into()).collect(),
+ discovery_keys: validators.iter().map(|v| v.public().into()).collect(),
+ assignment_keys: validators.iter().map(|v| v.public().into()).collect(),
+ validator_groups: validator_groups.clone(),
+ n_cores: validator_groups.len() as _,
+ zeroth_delay_tranche_width: 5,
+ relay_vrf_modulo_samples: 3,
+ n_delay_tranches: 50,
+ no_show_slots,
+ needed_approvals,
+ ..Default::default()
+ })
+ };
+ let core_index = 0.into();
+
+ let block_hash = Hash::repeat_byte(0x01);
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+
+ add_block(
+ &mut state.db,
+ block_hash,
+ session_index,
+ slot,
+ );
+
+ add_candidate_to_block(
+ &mut state.db,
+ block_hash,
+ candidate_hash,
+ n_validators,
+ core_index,
+ GroupIndex(0),
+ );
+
+ state
+}
+
+fn add_block(
+ db: &mut TestStore,
+ block_hash: Hash,
+ session: SessionIndex,
+ slot: Slot,
+) {
+ db.block_entries.insert(
+ block_hash,
+ approval_db::v1::BlockEntry {
+ block_hash,
+ session,
+ slot,
+ candidates: Vec::new(),
+ relay_vrf_story: Default::default(),
+ approved_bitfield: Default::default(),
+ children: Default::default(),
+ }.into(),
+ );
+}
+
+fn add_candidate_to_block(
+ db: &mut TestStore,
+ block_hash: Hash,
+ candidate_hash: CandidateHash,
+ n_validators: usize,
+ core: CoreIndex,
+ backing_group: GroupIndex,
+) {
+ let mut block_entry = db.block_entries.get(&block_hash).unwrap().clone();
+
+ let candidate_entry = db.candidate_entries
+ .entry(candidate_hash)
+ .or_insert_with(|| approval_db::v1::CandidateEntry {
+ session: block_entry.session(),
+ block_assignments: Default::default(),
+ candidate: CandidateReceipt::default(),
+ approvals: bitvec::bitvec![BitOrderLsb0, u8; 0; n_validators],
+ }.into());
+
+ block_entry.add_candidate(core, candidate_hash);
+
+ candidate_entry.add_approval_entry(
+ block_hash,
+ approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ backing_group,
+ our_assignment: None,
+ assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; n_validators],
+ approved: false,
+ }.into(),
+ );
+
+ db.block_entries.insert(block_hash, block_entry);
+}
+
+#[test]
+fn rejects_bad_assignment() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let assignment_good = IndirectAssignmentCert {
+ block_hash,
+ validator: 0,
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo {
+ sample: 0,
+ },
+ ),
+ };
+ let mut state = some_state(Default::default());
+ let candidate_index = 0;
+
+ let res = check_and_import_assignment(
+ &mut state,
+ assignment_good.clone(),
+ candidate_index,
+ ).unwrap();
+ assert_eq!(res.0, AssignmentCheckResult::Accepted);
+ // Check that the assignment's been imported.
+ assert!(res.1.iter().any(|action| matches!(action, Action::WriteCandidateEntry(..))));
+
+ // unknown hash
+ let assignment = IndirectAssignmentCert {
+ block_hash: Hash::repeat_byte(0x02),
+ validator: 0,
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo {
+ sample: 0,
+ },
+ ),
+ };
+
+ let res = check_and_import_assignment(
+ &mut state,
+ assignment,
+ candidate_index,
+ ).unwrap();
+ assert_eq!(res.0, AssignmentCheckResult::Bad);
+
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Err(criteria::InvalidAssignment)
+ })),
+ ..some_state(Default::default())
+ };
+
+ // same assignment, but this time rejected
+ let res = check_and_import_assignment(
+ &mut state,
+ assignment_good,
+ candidate_index,
+ ).unwrap();
+ assert_eq!(res.0, AssignmentCheckResult::Bad);
+}
+
+#[test]
+fn rejects_assignment_in_future() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let candidate_index = 0;
+ let assignment = IndirectAssignmentCert {
+ block_hash,
+ validator: 0,
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo {
+ sample: 0,
+ },
+ ),
+ };
+
+ let tick = 9;
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(move || {
+ Ok((tick + 20) as _)
+ })),
+ ..some_state(StateConfig { tick, ..Default::default() })
+ };
+
+ let res = check_and_import_assignment(
+ &mut state,
+ assignment.clone(),
+ candidate_index,
+ ).unwrap();
+ assert_eq!(res.0, AssignmentCheckResult::TooFarInFuture);
+
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(move || {
+ Ok((tick + 20 - 1) as _)
+ })),
+ ..some_state(StateConfig { tick, ..Default::default() })
+ };
+
+ let res = check_and_import_assignment(
+ &mut state,
+ assignment.clone(),
+ candidate_index,
+ ).unwrap();
+ assert_eq!(res.0, AssignmentCheckResult::Accepted);
+}
+
+#[test]
+fn rejects_assignment_with_unknown_candidate() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let candidate_index = 1;
+ let assignment = IndirectAssignmentCert {
+ block_hash,
+ validator: 0,
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo {
+ sample: 0,
+ },
+ ),
+ };
+
+ let mut state = some_state(Default::default());
+
+ let res = check_and_import_assignment(
+ &mut state,
+ assignment.clone(),
+ candidate_index,
+ ).unwrap();
+ assert_eq!(res.0, AssignmentCheckResult::Bad);
+}
+
+#[test]
+fn assignment_import_updates_candidate_entry_and_schedules_wakeup() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+
+ let candidate_index = 0;
+ let assignment = IndirectAssignmentCert {
+ block_hash,
+ validator: 0,
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo {
+ sample: 0,
+ },
+ ),
+ };
+
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Ok(0)
+ })),
+ ..some_state(Default::default())
+ };
+
+ let (res, actions) = check_and_import_assignment(
+ &mut state,
+ assignment.clone(),
+ candidate_index,
+ ).unwrap();
+
+ assert_eq!(res, AssignmentCheckResult::Accepted);
+ assert_eq!(actions.len(), 2);
+
+ assert_matches!(
+ actions.get(0).unwrap(),
+ Action::ScheduleWakeup {
+ block_hash: b,
+ candidate_hash: c,
+ tick,
+ } => {
+ assert_eq!(b, &block_hash);
+ assert_eq!(c, &candidate_hash);
+ assert_eq!(tick, &slot_to_tick(0 + 2)); // current tick + no-show-duration.
+ }
+ );
+
+ assert_matches!(
+ actions.get(1).unwrap(),
+ Action::WriteCandidateEntry(c, e) => {
+ assert_eq!(c, &candidate_hash);
+ assert!(e.approval_entry(&block_hash).unwrap().is_assigned(0));
+ }
+ );
+}
+
+#[test]
+fn rejects_approval_before_assignment() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+
+ let state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Ok(0)
+ })),
+ ..some_state(Default::default())
+ };
+
+ let vote = IndirectSignedApprovalVote {
+ block_hash,
+ candidate_index: 0,
+ validator: 0,
+ signature: sign_approval(Sr25519Keyring::Alice, candidate_hash, 1),
+ };
+
+ let (actions, res) = check_and_import_approval(
+ &state,
+ vote,
+ |r| r
+ ).unwrap();
+
+ assert_eq!(res, ApprovalCheckResult::Bad);
+ assert!(actions.is_empty());
+}
+
+#[test]
+fn rejects_approval_if_no_candidate_entry() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Ok(0)
+ })),
+ ..some_state(Default::default())
+ };
+
+ let vote = IndirectSignedApprovalVote {
+ block_hash,
+ candidate_index: 0,
+ validator: 0,
+ signature: sign_approval(Sr25519Keyring::Alice, candidate_hash, 1),
+ };
+
+ state.db.candidate_entries.remove(&candidate_hash);
+
+ let (actions, res) = check_and_import_approval(
+ &state,
+ vote,
+ |r| r
+ ).unwrap();
+
+ assert_eq!(res, ApprovalCheckResult::Bad);
+ assert!(actions.is_empty());
+}
+
+#[test]
+fn rejects_approval_if_no_block_entry() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+ let validator_index = 0;
+
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Ok(0)
+ })),
+ ..some_state(Default::default())
+ };
+
+ let vote = IndirectSignedApprovalVote {
+ block_hash,
+ candidate_index: 0,
+ validator: 0,
+ signature: sign_approval(Sr25519Keyring::Alice, candidate_hash, 1),
+ };
+
+ state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, validator_index, 0);
+
+ state.db.block_entries.remove(&block_hash);
+
+ let (actions, res) = check_and_import_approval(
+ &state,
+ vote,
+ |r| r
+ ).unwrap();
+
+ assert_eq!(res, ApprovalCheckResult::Bad);
+ assert!(actions.is_empty());
+}
+
+#[test]
+fn accepts_and_imports_approval_after_assignment() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+ let validator_index = 0;
+
+ let candidate_index = 0;
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Ok(0)
+ })),
+ ..some_state(StateConfig {
+ validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie],
+ validator_groups: vec![vec![0, 1], vec![2]],
+ needed_approvals: 2,
+ ..Default::default()
+ })
+ };
+
+ let vote = IndirectSignedApprovalVote {
+ block_hash,
+ candidate_index,
+ validator: validator_index,
+ signature: sign_approval(Sr25519Keyring::Alice, candidate_hash, 1),
+ };
+
+ state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, validator_index, 0);
+
+ let (actions, res) = check_and_import_approval(
+ &state,
+ vote,
+ |r| r
+ ).unwrap();
+
+ assert_eq!(res, ApprovalCheckResult::Accepted);
+
+ assert_eq!(actions.len(), 1);
+ assert_matches!(
+ actions.get(0).unwrap(),
+ Action::WriteCandidateEntry(c_hash, c_entry) => {
+ assert_eq!(c_hash, &candidate_hash);
+ assert!(c_entry.approvals().get(validator_index as usize).unwrap());
+ assert!(!c_entry.approval_entry(&block_hash).unwrap().is_approved());
+ }
+ );
+}
+
+#[test]
+fn second_approval_import_is_no_op() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+ let validator_index = 0;
+
+ let candidate_index = 0;
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Ok(0)
+ })),
+ ..some_state(StateConfig {
+ validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie],
+ validator_groups: vec![vec![0, 1], vec![2]],
+ needed_approvals: 2,
+ ..Default::default()
+ })
+ };
+
+ let vote = IndirectSignedApprovalVote {
+ block_hash,
+ candidate_index,
+ validator: validator_index,
+ signature: sign_approval(Sr25519Keyring::Alice, candidate_hash, 1),
+ };
+
+ state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, validator_index, 0);
+
+ assert!(!state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .mark_approval(validator_index));
+
+ let (actions, res) = check_and_import_approval(
+ &state,
+ vote,
+ |r| r
+ ).unwrap();
+
+ assert_eq!(res, ApprovalCheckResult::Accepted);
+ assert!(actions.is_empty())
+}
+
+#[test]
+fn check_and_apply_full_approval_sets_flag_and_bit() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+ let validator_index_a = 0;
+ let validator_index_b = 1;
+
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Ok(0)
+ })),
+ ..some_state(StateConfig {
+ validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie],
+ validator_groups: vec![vec![0, 1], vec![2]],
+ needed_approvals: 2,
+ ..Default::default()
+ })
+ };
+
+ state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, validator_index_a, 0);
+
+ state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, validator_index_b, 0);
+
+ assert!(!state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .mark_approval(validator_index_a));
+
+ assert!(!state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .mark_approval(validator_index_b));
+
+ let actions = check_and_apply_full_approval(
+ &state,
+ None,
+ candidate_hash,
+ state.db.candidate_entries.get(&candidate_hash).unwrap().clone(),
+ |b_hash, _a| b_hash == &block_hash,
+ ).unwrap();
+
+ assert_eq!(actions.len(), 2);
+ assert_matches!(
+ actions.get(0).unwrap(),
+ Action::WriteBlockEntry(b_entry) => {
+ assert_eq!(b_entry.block_hash(), block_hash);
+ assert!(b_entry.is_fully_approved());
+ assert!(b_entry.is_candidate_approved(&candidate_hash));
+ }
+ );
+ assert_matches!(
+ actions.get(1).unwrap(),
+ Action::WriteCandidateEntry(c_hash, c_entry) => {
+ assert_eq!(c_hash, &candidate_hash);
+ assert!(c_entry.approval_entry(&block_hash).unwrap().is_approved());
+ }
+ );
+}
+
+#[test]
+fn check_and_apply_full_approval_does_not_load_cached_block_from_db() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+ let validator_index_a = 0;
+ let validator_index_b = 1;
+
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Ok(0)
+ })),
+ ..some_state(StateConfig {
+ validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie],
+ validator_groups: vec![vec![0, 1], vec![2]],
+ needed_approvals: 2,
+ ..Default::default()
+ })
+ };
+
+ state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, validator_index_a, 0);
+
+ state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, validator_index_b, 0);
+
+ assert!(!state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .mark_approval(validator_index_a));
+
+ assert!(!state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .mark_approval(validator_index_b));
+
+ let block_entry = state.db.block_entries.remove(&block_hash).unwrap();
+
+ let actions = check_and_apply_full_approval(
+ &state,
+ Some((block_hash, block_entry)),
+ candidate_hash,
+ state.db.candidate_entries.get(&candidate_hash).unwrap().clone(),
+ |b_hash, _a| b_hash == &block_hash,
+ ).unwrap();
+
+ assert_eq!(actions.len(), 2);
+ assert_matches!(
+ actions.get(0).unwrap(),
+ Action::WriteBlockEntry(b_entry) => {
+ assert_eq!(b_entry.block_hash(), block_hash);
+ assert!(b_entry.is_fully_approved());
+ assert!(b_entry.is_candidate_approved(&candidate_hash));
+ }
+ );
+ assert_matches!(
+ actions.get(1).unwrap(),
+ Action::WriteCandidateEntry(c_hash, c_entry) => {
+ assert_eq!(c_hash, &candidate_hash);
+ assert!(c_entry.approval_entry(&block_hash).unwrap().is_approved());
+ }
+ );
+}
+
+#[test]
+fn assignment_triggered_by_all_with_less_than_supermajority() {
+ let block_hash = Hash::repeat_byte(0x01);
+
+ let mut candidate_entry: CandidateEntry = {
+ let approval_entry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ backing_group: GroupIndex(0),
+ our_assignment: Some(approval_db::v1::OurAssignment {
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo { sample: 0 }
+ ),
+ tranche: 1,
+ validator_index: 4,
+ triggered: false,
+ }),
+ assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ approved: false,
+ };
+
+ approval_db::v1::CandidateEntry {
+ candidate: Default::default(),
+ session: 1,
+ block_assignments: vec![(block_hash, approval_entry)].into_iter().collect(),
+ approvals: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ }.into()
+ };
+
+ // 2-of-4
+ candidate_entry
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, 0, 0);
+
+ candidate_entry
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, 1, 0);
+
+ candidate_entry.mark_approval(0);
+ candidate_entry.mark_approval(1);
+
+ let tranche_now = 1;
+ assert!(should_trigger_assignment(
+ candidate_entry.approval_entry(&block_hash).unwrap(),
+ &candidate_entry,
+ RequiredTranches::All,
+ tranche_now,
+ ));
+}
+
+#[test]
+fn assignment_not_triggered_by_all_with_supermajority() {
+ let block_hash = Hash::repeat_byte(0x01);
+
+ let mut candidate_entry: CandidateEntry = {
+ let approval_entry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ backing_group: GroupIndex(0),
+ our_assignment: Some(approval_db::v1::OurAssignment {
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo { sample: 0 }
+ ),
+ tranche: 1,
+ validator_index: 4,
+ triggered: false,
+ }),
+ assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ approved: false,
+ };
+
+ approval_db::v1::CandidateEntry {
+ candidate: Default::default(),
+ session: 1,
+ block_assignments: vec![(block_hash, approval_entry)].into_iter().collect(),
+ approvals: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ }.into()
+ };
+
+ // 3-of-4
+ candidate_entry
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, 0, 0);
+
+ candidate_entry
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, 1, 0);
+
+ candidate_entry
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, 2, 0);
+
+ candidate_entry.mark_approval(0);
+ candidate_entry.mark_approval(1);
+ candidate_entry.mark_approval(2);
+
+ let tranche_now = 1;
+ assert!(!should_trigger_assignment(
+ candidate_entry.approval_entry(&block_hash).unwrap(),
+ &candidate_entry,
+ RequiredTranches::All,
+ tranche_now,
+ ));
+}
+
+#[test]
+fn assignment_not_triggered_if_already_triggered() {
+ let block_hash = Hash::repeat_byte(0x01);
+
+ let candidate_entry: CandidateEntry = {
+ let approval_entry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ backing_group: GroupIndex(0),
+ our_assignment: Some(approval_db::v1::OurAssignment {
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo { sample: 0 }
+ ),
+ tranche: 1,
+ validator_index: 4,
+ triggered: true,
+ }),
+ assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ approved: false,
+ };
+
+ approval_db::v1::CandidateEntry {
+ candidate: Default::default(),
+ session: 1,
+ block_assignments: vec![(block_hash, approval_entry)].into_iter().collect(),
+ approvals: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ }.into()
+ };
+
+ let tranche_now = 1;
+ assert!(!should_trigger_assignment(
+ candidate_entry.approval_entry(&block_hash).unwrap(),
+ &candidate_entry,
+ RequiredTranches::All,
+ tranche_now,
+ ));
+}
+
+#[test]
+fn assignment_not_triggered_by_exact() {
+ let block_hash = Hash::repeat_byte(0x01);
+
+ let candidate_entry: CandidateEntry = {
+ let approval_entry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ backing_group: GroupIndex(0),
+ our_assignment: Some(approval_db::v1::OurAssignment {
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo { sample: 0 }
+ ),
+ tranche: 1,
+ validator_index: 4,
+ triggered: false,
+ }),
+ assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ approved: false,
+ };
+
+ approval_db::v1::CandidateEntry {
+ candidate: Default::default(),
+ session: 1,
+ block_assignments: vec![(block_hash, approval_entry)].into_iter().collect(),
+ approvals: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ }.into()
+ };
+
+ let tranche_now = 1;
+ assert!(!should_trigger_assignment(
+ candidate_entry.approval_entry(&block_hash).unwrap(),
+ &candidate_entry,
+ RequiredTranches::Exact { needed: 2, next_no_show: None, tolerated_missing: 0 },
+ tranche_now,
+ ));
+}
+
+#[test]
+fn assignment_not_triggered_more_than_maximum() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let maximum_broadcast = 10;
+
+ let candidate_entry: CandidateEntry = {
+ let approval_entry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ backing_group: GroupIndex(0),
+ our_assignment: Some(approval_db::v1::OurAssignment {
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo { sample: 0 }
+ ),
+ tranche: maximum_broadcast + 1,
+ validator_index: 4,
+ triggered: false,
+ }),
+ assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ approved: false,
+ };
+
+ approval_db::v1::CandidateEntry {
+ candidate: Default::default(),
+ session: 1,
+ block_assignments: vec![(block_hash, approval_entry)].into_iter().collect(),
+ approvals: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ }.into()
+ };
+
+ let tranche_now = 50;
+ assert!(!should_trigger_assignment(
+ candidate_entry.approval_entry(&block_hash).unwrap(),
+ &candidate_entry,
+ RequiredTranches::Pending {
+ maximum_broadcast,
+ clock_drift: 0,
+ considered: 10,
+ next_no_show: None,
+ },
+ tranche_now,
+ ));
+}
+
+#[test]
+fn assignment_triggered_if_at_maximum() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let maximum_broadcast = 10;
+
+ let candidate_entry: CandidateEntry = {
+ let approval_entry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ backing_group: GroupIndex(0),
+ our_assignment: Some(approval_db::v1::OurAssignment {
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo { sample: 0 }
+ ),
+ tranche: maximum_broadcast,
+ validator_index: 4,
+ triggered: false,
+ }),
+ assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ approved: false,
+ };
+
+ approval_db::v1::CandidateEntry {
+ candidate: Default::default(),
+ session: 1,
+ block_assignments: vec![(block_hash, approval_entry)].into_iter().collect(),
+ approvals: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ }.into()
+ };
+
+ let tranche_now = maximum_broadcast;
+ assert!(should_trigger_assignment(
+ candidate_entry.approval_entry(&block_hash).unwrap(),
+ &candidate_entry,
+ RequiredTranches::Pending {
+ maximum_broadcast,
+ clock_drift: 0,
+ considered: 10,
+ next_no_show: None,
+ },
+ tranche_now,
+ ));
+}
+
+#[test]
+fn assignment_not_triggered_if_at_maximum_but_clock_is_before() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let maximum_broadcast = 10;
+
+ let candidate_entry: CandidateEntry = {
+ let approval_entry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ backing_group: GroupIndex(0),
+ our_assignment: Some(approval_db::v1::OurAssignment {
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo { sample: 0 }
+ ),
+ tranche: maximum_broadcast,
+ validator_index: 4,
+ triggered: false,
+ }),
+ assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ approved: false,
+ };
+
+ approval_db::v1::CandidateEntry {
+ candidate: Default::default(),
+ session: 1,
+ block_assignments: vec![(block_hash, approval_entry)].into_iter().collect(),
+ approvals: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ }.into()
+ };
+
+ let tranche_now = 9;
+ assert!(!should_trigger_assignment(
+ candidate_entry.approval_entry(&block_hash).unwrap(),
+ &candidate_entry,
+ RequiredTranches::Pending {
+ maximum_broadcast,
+ clock_drift: 0,
+ considered: 10,
+ next_no_show: None,
+ },
+ tranche_now,
+ ));
+}
+
+#[test]
+fn assignment_not_triggered_if_at_maximum_but_clock_is_before_with_drift() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let maximum_broadcast = 10;
+
+ let candidate_entry: CandidateEntry = {
+ let approval_entry = approval_db::v1::ApprovalEntry {
+ tranches: Vec::new(),
+ backing_group: GroupIndex(0),
+ our_assignment: Some(approval_db::v1::OurAssignment {
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo { sample: 0 }
+ ),
+ tranche: maximum_broadcast,
+ validator_index: 4,
+ triggered: false,
+ }),
+ assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ approved: false,
+ };
+
+ approval_db::v1::CandidateEntry {
+ candidate: Default::default(),
+ session: 1,
+ block_assignments: vec![(block_hash, approval_entry)].into_iter().collect(),
+ approvals: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
+ }.into()
+ };
+
+ let tranche_now = 10;
+ assert!(!should_trigger_assignment(
+ candidate_entry.approval_entry(&block_hash).unwrap(),
+ &candidate_entry,
+ RequiredTranches::Pending {
+ maximum_broadcast,
+ clock_drift: 1,
+ considered: 10,
+ next_no_show: None,
+ },
+ tranche_now,
+ ));
+}
+
+#[test]
+fn wakeups_drain() {
+ let mut wakeups = Wakeups::default();
+
+ let b_a = Hash::repeat_byte(0);
+ let b_b = Hash::repeat_byte(1);
+
+ let c_a = CandidateHash(Hash::repeat_byte(2));
+ let c_b = CandidateHash(Hash::repeat_byte(3));
+
+ wakeups.schedule(b_a, c_a, 1);
+ wakeups.schedule(b_a, c_b, 4);
+ wakeups.schedule(b_b, c_b, 3);
+
+ assert_eq!(wakeups.first().unwrap(), 1);
+
+ assert_eq!(
+ wakeups.drain(..=3).collect::>(),
+ vec![(b_a, c_a), (b_b, c_b)],
+ );
+
+ assert_eq!(wakeups.first().unwrap(), 4);
+}
+
+#[test]
+fn wakeup_earlier_supersedes_later() {
+ let mut wakeups = Wakeups::default();
+
+ let b_a = Hash::repeat_byte(0);
+ let c_a = CandidateHash(Hash::repeat_byte(2));
+
+ wakeups.schedule(b_a, c_a, 4);
+ wakeups.schedule(b_a, c_a, 2);
+ wakeups.schedule(b_a, c_a, 3);
+
+ assert_eq!(wakeups.first().unwrap(), 2);
+
+ assert_eq!(
+ wakeups.drain(..=2).collect::>(),
+ vec![(b_a, c_a)],
+ );
+
+ assert!(wakeups.first().is_none());
+}
+
+#[test]
+fn block_not_approved_until_all_candidates_approved() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+ let candidate_hash_2 = CandidateHash(Hash::repeat_byte(0xDD));
+
+ let validator_index_a = 0;
+ let validator_index_b = 1;
+
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Ok(0)
+ })),
+ ..some_state(StateConfig {
+ validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie],
+ validator_groups: vec![vec![0, 1], vec![2]],
+ needed_approvals: 2,
+ ..Default::default()
+ })
+ };
+
+ add_candidate_to_block(
+ &mut state.db,
+ block_hash,
+ candidate_hash_2,
+ 3,
+ CoreIndex(1),
+ GroupIndex(1),
+ );
+
+ let approve_candidate = |db: &mut TestStore, c_hash| {
+ db.candidate_entries.get_mut(&c_hash).unwrap()
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, validator_index_a, 0);
+
+ db.candidate_entries.get_mut(&c_hash).unwrap()
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, validator_index_b, 0);
+
+ assert!(!db.candidate_entries.get_mut(&c_hash).unwrap()
+ .mark_approval(validator_index_a));
+
+ assert!(!db.candidate_entries.get_mut(&c_hash).unwrap()
+ .mark_approval(validator_index_b));
+ };
+
+ approve_candidate(&mut state.db, candidate_hash_2);
+
+ {
+ let b = state.db.block_entries.get_mut(&block_hash).unwrap();
+ b.mark_approved_by_hash(&candidate_hash);
+ assert!(!b.is_fully_approved());
+ }
+
+ let actions = check_and_apply_full_approval(
+ &state,
+ None,
+ candidate_hash_2,
+ state.db.candidate_entries.get(&candidate_hash_2).unwrap().clone(),
+ |b_hash, _a| b_hash == &block_hash,
+ ).unwrap();
+
+ assert_eq!(actions.len(), 2);
+ assert_matches!(
+ actions.get(0).unwrap(),
+ Action::WriteBlockEntry(b_entry) => {
+ assert_eq!(b_entry.block_hash(), block_hash);
+ assert!(b_entry.is_fully_approved());
+ assert!(b_entry.is_candidate_approved(&candidate_hash_2));
+ }
+ );
+
+ assert_matches!(
+ actions.get(1).unwrap(),
+ Action::WriteCandidateEntry(c_h, c_entry) => {
+ assert_eq!(c_h, &candidate_hash_2);
+ assert!(c_entry.approval_entry(&block_hash).unwrap().is_approved());
+ }
+ );
+}
+
+#[test]
+fn candidate_approval_applied_to_all_blocks() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let block_hash_2 = Hash::repeat_byte(0x02);
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+ let validator_index_a = 0;
+ let validator_index_b = 1;
+
+ let slot = Slot::from(1);
+ let session_index = 1;
+
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Ok(0)
+ })),
+ ..some_state(StateConfig {
+ validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie],
+ validator_groups: vec![vec![0, 1], vec![2]],
+ needed_approvals: 2,
+ session_index,
+ slot,
+ ..Default::default()
+ })
+ };
+
+ add_block(
+ &mut state.db,
+ block_hash_2,
+ session_index,
+ slot,
+ );
+
+ add_candidate_to_block(
+ &mut state.db,
+ block_hash_2,
+ candidate_hash,
+ 3,
+ CoreIndex(1),
+ GroupIndex(1),
+ );
+
+ state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, validator_index_a, 0);
+
+ state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .import_assignment(0, validator_index_b, 0);
+
+ state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .approval_entry_mut(&block_hash_2)
+ .unwrap()
+ .import_assignment(0, validator_index_a, 0);
+
+ state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .approval_entry_mut(&block_hash_2)
+ .unwrap()
+ .import_assignment(0, validator_index_b, 0);
+
+ assert!(!state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .mark_approval(validator_index_a));
+
+ assert!(!state.db.candidate_entries.get_mut(&candidate_hash).unwrap()
+ .mark_approval(validator_index_b));
+
+ let actions = check_and_apply_full_approval(
+ &state,
+ None,
+ candidate_hash,
+ state.db.candidate_entries.get(&candidate_hash).unwrap().clone(),
+ |_b_hash, _a| true,
+ ).unwrap();
+
+ assert_eq!(actions.len(), 3);
+ assert_matches!(
+ actions.get(0).unwrap(),
+ Action::WriteBlockEntry(b_entry) => {
+ assert_eq!(b_entry.block_hash(), block_hash);
+ assert!(b_entry.is_fully_approved());
+ assert!(b_entry.is_candidate_approved(&candidate_hash));
+ }
+ );
+ assert_matches!(
+ actions.get(1).unwrap(),
+ Action::WriteBlockEntry(b_entry) => {
+ assert_eq!(b_entry.block_hash(), block_hash_2);
+ assert!(b_entry.is_fully_approved());
+ assert!(b_entry.is_candidate_approved(&candidate_hash));
+ }
+ );
+ assert_matches!(
+ actions.get(2).unwrap(),
+ Action::WriteCandidateEntry(c_hash, c_entry) => {
+ assert_eq!(c_hash, &candidate_hash);
+ assert!(c_entry.approval_entry(&block_hash).unwrap().is_approved());
+ assert!(c_entry.approval_entry(&block_hash_2).unwrap().is_approved());
+ }
+ );
+}
+
+#[test]
+fn approved_ancestor_all_approved() {
+ let block_hash_1 = Hash::repeat_byte(0x01);
+ let block_hash_2 = Hash::repeat_byte(0x02);
+ let block_hash_3 = Hash::repeat_byte(0x03);
+ let block_hash_4 = Hash::repeat_byte(0x04);
+
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+
+ let slot = Slot::from(1);
+ let session_index = 1;
+
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Ok(0)
+ })),
+ ..some_state(StateConfig {
+ validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob],
+ validator_groups: vec![vec![0], vec![1]],
+ needed_approvals: 2,
+ session_index,
+ slot,
+ ..Default::default()
+ })
+ };
+
+ let add_block = |db: &mut TestStore, block_hash, approved| {
+ add_block(
+ db,
+ block_hash,
+ session_index,
+ slot,
+ );
+
+ let b = db.block_entries.get_mut(&block_hash).unwrap();
+ b.add_candidate(CoreIndex(0), candidate_hash);
+ if approved {
+ b.mark_approved_by_hash(&candidate_hash);
+ }
+ };
+
+ add_block(&mut state.db, block_hash_1, true);
+ add_block(&mut state.db, block_hash_2, true);
+ add_block(&mut state.db, block_hash_3, true);
+ add_block(&mut state.db, block_hash_4, true);
+
+ let pool = TaskExecutor::new();
+ let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
+
+ let test_fut = Box::pin(async move {
+ assert_eq!(
+ handle_approved_ancestor(&mut ctx, &state.db, block_hash_4, 0).await.unwrap(),
+ Some(block_hash_4),
+ )
+ });
+
+ let aux_fut = Box::pin(async move {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::ChainApi(ChainApiMessage::BlockNumber(target, tx)) => {
+ assert_eq!(target, block_hash_4);
+ let _ = tx.send(Ok(Some(4)));
+ }
+ );
+
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::ChainApi(ChainApiMessage::Ancestors {
+ hash,
+ k,
+ response_channel: tx,
+ }) => {
+ assert_eq!(hash, block_hash_4);
+ assert_eq!(k, 4 - (0 + 1));
+ let _ = tx.send(Ok(vec![block_hash_3, block_hash_2, block_hash_1]));
+ }
+ );
+ });
+
+ futures::executor::block_on(futures::future::select(test_fut, aux_fut));
+}
+
+#[test]
+fn approved_ancestor_missing_approval() {
+ let block_hash_1 = Hash::repeat_byte(0x01);
+ let block_hash_2 = Hash::repeat_byte(0x02);
+ let block_hash_3 = Hash::repeat_byte(0x03);
+ let block_hash_4 = Hash::repeat_byte(0x04);
+
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+
+ let slot = Slot::from(1);
+ let session_index = 1;
+
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Ok(0)
+ })),
+ ..some_state(StateConfig {
+ validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob],
+ validator_groups: vec![vec![0], vec![1]],
+ needed_approvals: 2,
+ session_index,
+ slot,
+ ..Default::default()
+ })
+ };
+
+ let add_block = |db: &mut TestStore, block_hash, approved| {
+ add_block(
+ db,
+ block_hash,
+ session_index,
+ slot,
+ );
+
+ let b = db.block_entries.get_mut(&block_hash).unwrap();
+ b.add_candidate(CoreIndex(0), candidate_hash);
+ if approved {
+ b.mark_approved_by_hash(&candidate_hash);
+ }
+ };
+
+ add_block(&mut state.db, block_hash_1, true);
+ add_block(&mut state.db, block_hash_2, true);
+ add_block(&mut state.db, block_hash_3, false);
+ add_block(&mut state.db, block_hash_4, true);
+
+ let pool = TaskExecutor::new();
+ let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
+
+ let test_fut = Box::pin(async move {
+ assert_eq!(
+ handle_approved_ancestor(&mut ctx, &state.db, block_hash_4, 0).await.unwrap(),
+ Some(block_hash_2),
+ )
+ });
+
+ let aux_fut = Box::pin(async move {
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::ChainApi(ChainApiMessage::BlockNumber(target, tx)) => {
+ assert_eq!(target, block_hash_4);
+ let _ = tx.send(Ok(Some(4)));
+ }
+ );
+
+ assert_matches!(
+ handle.recv().await,
+ AllMessages::ChainApi(ChainApiMessage::Ancestors {
+ hash,
+ k,
+ response_channel: tx,
+ }) => {
+ assert_eq!(hash, block_hash_4);
+ assert_eq!(k, 4 - (0 + 1));
+ let _ = tx.send(Ok(vec![block_hash_3, block_hash_2, block_hash_1]));
+ }
+ );
+ });
+
+ futures::executor::block_on(futures::future::select(test_fut, aux_fut));
+}
+
+#[test]
+fn process_wakeup_trigger_assignment_launch_approval() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+ let slot = Slot::from(1);
+ let session_index = 1;
+
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Ok(0)
+ })),
+ ..some_state(StateConfig {
+ validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob],
+ validator_groups: vec![vec![0], vec![1]],
+ needed_approvals: 2,
+ session_index,
+ slot,
+ ..Default::default()
+ })
+ };
+
+ let actions = process_wakeup(
+ &state,
+ block_hash,
+ candidate_hash,
+ ).unwrap();
+
+ assert!(actions.is_empty());
+
+ state.db.candidate_entries
+ .get_mut(&candidate_hash)
+ .unwrap()
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .set_our_assignment(approval_db::v1::OurAssignment {
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo { sample: 0 }
+ ),
+ tranche: 0,
+ validator_index: 0,
+ triggered: false,
+ }.into());
+
+ let actions = process_wakeup(
+ &state,
+ block_hash,
+ candidate_hash,
+ ).unwrap();
+
+ assert_eq!(actions.len(), 3);
+ assert_matches!(
+ actions.get(0).unwrap(),
+ Action::WriteCandidateEntry(c_hash, c_entry) => {
+ assert_eq!(c_hash, &candidate_hash);
+ assert!(c_entry
+ .approval_entry(&block_hash)
+ .unwrap()
+ .our_assignment()
+ .unwrap()
+ .triggered()
+ );
+ }
+ );
+
+ assert_matches!(
+ actions.get(1).unwrap(),
+ Action::LaunchApproval {
+ candidate_index,
+ ..
+ } => {
+ assert_eq!(candidate_index, &0);
+ }
+ );
+
+ assert_matches!(
+ actions.get(2).unwrap(),
+ Action::ScheduleWakeup {
+ tick,
+ ..
+ } => {
+ assert_eq!(tick, &slot_to_tick(0 + 2));
+ }
+ )
+}
+
+#[test]
+fn process_wakeup_schedules_wakeup() {
+ let block_hash = Hash::repeat_byte(0x01);
+ let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
+ let slot = Slot::from(1);
+ let session_index = 1;
+
+ let mut state = State {
+ assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
+ Ok(10)
+ })),
+ ..some_state(StateConfig {
+ validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob],
+ validator_groups: vec![vec![0], vec![1]],
+ needed_approvals: 2,
+ session_index,
+ slot,
+ ..Default::default()
+ })
+ };
+
+ state.db.candidate_entries
+ .get_mut(&candidate_hash)
+ .unwrap()
+ .approval_entry_mut(&block_hash)
+ .unwrap()
+ .set_our_assignment(approval_db::v1::OurAssignment {
+ cert: garbage_assignment_cert(
+ AssignmentCertKind::RelayVRFModulo { sample: 0 }
+ ),
+ tranche: 10,
+ validator_index: 0,
+ triggered: false,
+ }.into());
+
+ let actions = process_wakeup(
+ &state,
+ block_hash,
+ candidate_hash,
+ ).unwrap();
+
+ assert_eq!(actions.len(), 1);
+ assert_matches!(
+ actions.get(0).unwrap(),
+ Action::ScheduleWakeup { block_hash: b, candidate_hash: c, tick } => {
+ assert_eq!(b, &block_hash);
+ assert_eq!(c, &candidate_hash);
+ assert_eq!(tick, &(slot_to_tick(slot) + 10));
+ }
+ );
+}
+
+#[test]
+fn triggered_assignment_leads_to_recovery_and_validation() {
+
+}
+
+#[test]
+fn finalization_event_prunes() {
+
+}
diff --git a/node/core/approval-voting/src/time.rs b/node/core/approval-voting/src/time.rs
new file mode 100644
index 000000000000..4ca85fa44dae
--- /dev/null
+++ b/node/core/approval-voting/src/time.rs
@@ -0,0 +1,88 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! Time utilities for approval voting.
+
+use polkadot_node_primitives::approval::DelayTranche;
+use sp_consensus_slots::Slot;
+use futures::prelude::*;
+use std::time::{Duration, SystemTime};
+use std::pin::Pin;
+
+const TICK_DURATION_MILLIS: u64 = 500;
+
+/// A base unit of time, starting from the unix epoch, split into half-second intervals.
+pub(crate) type Tick = u64;
+
+/// A clock which allows querying of the current tick as well as
+/// waiting for a tick to be reached.
+pub(crate) trait Clock {
+ /// Yields the current tick.
+ fn tick_now(&self) -> Tick;
+
+ /// Yields a future which concludes when the given tick is reached.
+ fn wait(&self, tick: Tick) -> Pin + Send + 'static>>;
+}
+
+/// Extension methods for clocks.
+pub(crate) trait ClockExt {
+ fn tranche_now(&self, slot_duration_millis: u64, base_slot: Slot) -> DelayTranche;
+}
+
+impl ClockExt for C {
+ fn tranche_now(&self, slot_duration_millis: u64, base_slot: Slot) -> DelayTranche {
+ self.tick_now()
+ .saturating_sub(slot_number_to_tick(slot_duration_millis, base_slot)) as u32
+ }
+}
+
+/// A clock which uses the actual underlying system clock.
+pub(crate) struct SystemClock;
+
+impl Clock for SystemClock {
+ /// Yields the current tick.
+ fn tick_now(&self) -> Tick {
+ match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
+ Err(_) => 0,
+ Ok(d) => d.as_millis() as u64 / TICK_DURATION_MILLIS,
+ }
+ }
+
+ /// Yields a future which concludes when the given tick is reached.
+ fn wait(&self, tick: Tick) -> Pin + Send>> {
+ let fut = async move {
+ let now = SystemTime::now();
+ let tick_onset = tick_to_time(tick);
+ if now < tick_onset {
+ if let Some(until) = tick_onset.duration_since(now).ok() {
+ futures_timer::Delay::new(until).await;
+ }
+ }
+ };
+
+ Box::pin(fut)
+ }
+}
+
+fn tick_to_time(tick: Tick) -> SystemTime {
+ SystemTime::UNIX_EPOCH + Duration::from_millis(TICK_DURATION_MILLIS * tick)
+}
+
+/// assumes `slot_duration_millis` evenly divided by tick duration.
+pub(crate) fn slot_number_to_tick(slot_duration_millis: u64, slot: Slot) -> Tick {
+ let ticks_per_slot = slot_duration_millis / TICK_DURATION_MILLIS;
+ u64::from(slot) * ticks_per_slot
+}
diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs
index 0b4806b3157b..ded52bb9448a 100644
--- a/node/core/av-store/src/lib.rs
+++ b/node/core/av-store/src/lib.rs
@@ -641,7 +641,7 @@ async fn process_block_activated(
for event in candidate_events {
match event {
- CandidateEvent::CandidateBacked(receipt, _head) => {
+ CandidateEvent::CandidateBacked(receipt, _head, _core_index, _group_index) => {
note_block_backed(
&subsystem.db,
&mut tx,
@@ -651,7 +651,7 @@ async fn process_block_activated(
receipt,
)?;
}
- CandidateEvent::CandidateIncluded(receipt, _head) => {
+ CandidateEvent::CandidateIncluded(receipt, _head, _core_index, _group_index) => {
note_block_included(
&subsystem.db,
&mut tx,
diff --git a/node/core/av-store/src/tests.rs b/node/core/av-store/src/tests.rs
index f7b3475ef9c8..f97da3ff5b99 100644
--- a/node/core/av-store/src/tests.rs
+++ b/node/core/av-store/src/tests.rs
@@ -27,6 +27,7 @@ use futures::{
use polkadot_primitives::v1::{
AvailableData, BlockData, CandidateDescriptor, CandidateReceipt, HeadData,
PersistedValidationData, PoV, Id as ParaId, CandidateHash, Header, ValidatorId,
+ CoreIndex, GroupIndex,
};
use polkadot_node_subsystem_util::TimeoutExt;
use polkadot_subsystem::{
@@ -219,6 +220,15 @@ fn with_tx(db: &Arc, f: impl FnOnce(&mut DBTransaction)) {
db.write(tx).unwrap();
}
+fn candidate_included(receipt: CandidateReceipt) -> CandidateEvent {
+ CandidateEvent::CandidateIncluded(
+ receipt,
+ HeadData::default(),
+ CoreIndex::default(),
+ GroupIndex::default(),
+ )
+}
+
#[test]
fn runtime_api_error_does_not_stop_the_subsystem() {
let store = Arc::new(kvdb_memorydb::create(columns::NUM_COLUMNS));
@@ -595,7 +605,7 @@ fn stored_data_kept_until_finalized() {
&mut virtual_overseer,
parent,
block_number,
- vec![CandidateEvent::CandidateIncluded(candidate, HeadData::default())],
+ vec![candidate_included(candidate)],
(0..n_validators).map(|_| Sr25519Keyring::Alice.public().into()).collect(),
).await;
@@ -737,7 +747,7 @@ fn forkfullness_works() {
&mut virtual_overseer,
parent_1,
block_number_1,
- vec![CandidateEvent::CandidateIncluded(candidate_1, HeadData::default())],
+ vec![candidate_included(candidate_1)],
validators.clone(),
).await;
@@ -745,7 +755,7 @@ fn forkfullness_works() {
&mut virtual_overseer,
parent_2,
block_number_2,
- vec![CandidateEvent::CandidateIncluded(candidate_2, HeadData::default())],
+ vec![candidate_included(candidate_2)],
validators.clone(),
).await;
diff --git a/node/core/runtime-api/Cargo.toml b/node/core/runtime-api/Cargo.toml
index f0302c86cf11..e564ac56050c 100644
--- a/node/core/runtime-api/Cargo.toml
+++ b/node/core/runtime-api/Cargo.toml
@@ -13,6 +13,7 @@ parity-util-mem = { version = "0.9.0", default-features = false }
sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" }
polkadot-primitives = { path = "../../../primitives" }
polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
@@ -22,3 +23,4 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
futures = { version = "0.3.12", features = ["thread-pool"] }
polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
+polkadot-node-primitives = { path = "../../primitives" }
diff --git a/node/core/runtime-api/src/cache.rs b/node/core/runtime-api/src/cache.rs
index ac9e0e844ebe..1dce87e68c22 100644
--- a/node/core/runtime-api/src/cache.rs
+++ b/node/core/runtime-api/src/cache.rs
@@ -20,6 +20,7 @@ use polkadot_primitives::v1::{
PersistedValidationData, Id as ParaId, OccupiedCoreAssumption,
SessionIndex, SessionInfo, ValidationCode, ValidatorId, ValidatorIndex,
};
+use sp_consensus_babe::Epoch;
use parity_util_mem::{MallocSizeOf, MallocSizeOfExt};
@@ -40,6 +41,7 @@ const CANDIDATE_EVENTS_CACHE_SIZE: usize = 64 * 1024;
const SESSION_INFO_CACHE_SIZE: usize = 64 * 1024;
const DMQ_CONTENTS_CACHE_SIZE: usize = 64 * 1024;
const INBOUND_HRMP_CHANNELS_CACHE_SIZE: usize = 64 * 1024;
+const CURRENT_BABE_EPOCH_CACHE_SIZE: usize = 64 * 1024;
struct ResidentSizeOf(T);
@@ -49,6 +51,14 @@ impl ResidentSize for ResidentSizeOf {
}
}
+struct DoesNotAllocate(T);
+
+impl ResidentSize for DoesNotAllocate {
+ fn resident_size(&self) -> usize {
+ std::mem::size_of::()
+ }
+}
+
pub(crate) struct RequestResultCache {
validators: MemoryLruCache>>,
validator_groups: MemoryLruCache>, GroupRotationInfo)>>,
@@ -63,6 +73,7 @@ pub(crate) struct RequestResultCache {
session_info: MemoryLruCache<(Hash, SessionIndex), ResidentSizeOf>>,
dmq_contents: MemoryLruCache<(Hash, ParaId), ResidentSizeOf>>>,
inbound_hrmp_channels_contents: MemoryLruCache<(Hash, ParaId), ResidentSizeOf>>>>,
+ current_babe_epoch: MemoryLruCache>,
}
impl Default for RequestResultCache {
@@ -81,6 +92,7 @@ impl Default for RequestResultCache {
session_info: MemoryLruCache::new(SESSION_INFO_CACHE_SIZE),
dmq_contents: MemoryLruCache::new(DMQ_CONTENTS_CACHE_SIZE),
inbound_hrmp_channels_contents: MemoryLruCache::new(INBOUND_HRMP_CHANNELS_CACHE_SIZE),
+ current_babe_epoch: MemoryLruCache::new(CURRENT_BABE_EPOCH_CACHE_SIZE),
}
}
}
@@ -189,6 +201,14 @@ impl RequestResultCache {
pub(crate) fn cache_inbound_hrmp_channel_contents(&mut self, key: (Hash, ParaId), value: BTreeMap>>) {
self.inbound_hrmp_channels_contents.insert(key, ResidentSizeOf(value));
}
+
+ pub(crate) fn current_babe_epoch(&mut self, relay_parent: &Hash) -> Option<&Epoch> {
+ self.current_babe_epoch.get(relay_parent).map(|v| &v.0)
+ }
+
+ pub(crate) fn cache_current_babe_epoch(&mut self, relay_parent: Hash, epoch: Epoch) {
+ self.current_babe_epoch.insert(relay_parent, DoesNotAllocate(epoch));
+ }
}
pub(crate) enum RequestResult {
@@ -205,4 +225,5 @@ pub(crate) enum RequestResult {
SessionInfo(Hash, SessionIndex, Option),
DmqContents(Hash, ParaId, Vec>),
InboundHrmpChannelsContents(Hash, ParaId, BTreeMap>>),
+ CurrentBabeEpoch(Hash, Epoch),
}
diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs
index 294b1be9197f..1da4c1be57c0 100644
--- a/node/core/runtime-api/src/lib.rs
+++ b/node/core/runtime-api/src/lib.rs
@@ -35,6 +35,7 @@ use polkadot_primitives::v1::{Block, BlockId, Hash, ParachainHost};
use sp_api::ProvideRuntimeApi;
use sp_core::traits::SpawnNamed;
+use sp_consensus_babe::BabeApi;
use futures::{prelude::*, stream::FuturesUnordered, channel::oneshot, select};
use std::{sync::Arc, collections::VecDeque, pin::Pin};
@@ -82,7 +83,7 @@ impl RuntimeApiSubsystem {
impl Subsystem for RuntimeApiSubsystem where
Client: ProvideRuntimeApi + Send + 'static + Sync,
- Client::Api: ParachainHost,
+ Client::Api: ParachainHost + BabeApi,
Context: SubsystemContext
{
fn start(self, ctx: Context) -> SpawnedSubsystem {
@@ -95,7 +96,7 @@ impl Subsystem for RuntimeApiSubsystem where
impl RuntimeApiSubsystem where
Client: ProvideRuntimeApi + Send + 'static + Sync,
- Client::Api: ParachainHost,
+ Client::Api: ParachainHost + BabeApi,
{
fn store_cache(&mut self, result: RequestResult) {
use RequestResult::*;
@@ -127,6 +128,8 @@ impl RuntimeApiSubsystem where
self.requests_cache.cache_dmq_contents((relay_parent, para_id), messages),
InboundHrmpChannelsContents(relay_parent, para_id, contents) =>
self.requests_cache.cache_inbound_hrmp_channel_contents((relay_parent, para_id), contents),
+ CurrentBabeEpoch(relay_parent, epoch) =>
+ self.requests_cache.cache_current_babe_epoch(relay_parent, epoch),
}
}
@@ -189,7 +192,10 @@ impl RuntimeApiSubsystem where
.map(|sender| Request::DmqContents(id, sender)),
Request::InboundHrmpChannelsContents(id, sender) =>
query!(inbound_hrmp_channels_contents(id), sender)
- .map(|sender| Request::InboundHrmpChannelsContents(id, sender))
+ .map(|sender| Request::InboundHrmpChannelsContents(id, sender)),
+ Request::CurrentBabeEpoch(sender) =>
+ query!(current_babe_epoch(), sender)
+ .map(|sender| Request::CurrentBabeEpoch(sender)),
}
}
@@ -257,7 +263,7 @@ async fn run(
mut subsystem: RuntimeApiSubsystem,
) -> SubsystemResult<()> where
Client: ProvideRuntimeApi + Send + Sync + 'static,
- Client::Api: ParachainHost,
+ Client::Api: ParachainHost + BabeApi,
{
loop {
select! {
@@ -285,7 +291,7 @@ fn make_runtime_api_request(
) -> Option
where
Client: ProvideRuntimeApi,
- Client::Api: ParachainHost,
+ Client::Api: ParachainHost + BabeApi,
{
let _timer = metrics.time_make_runtime_api_request();
@@ -339,6 +345,7 @@ where
Request::SessionInfo(index, sender) => query!(SessionInfo, session_info(index), sender),
Request::DmqContents(id, sender) => query!(DmqContents, dmq_contents(id), sender),
Request::InboundHrmpChannelsContents(id, sender) => query!(InboundHrmpChannelsContents, inbound_hrmp_channels_contents(id), sender),
+ Request::CurrentBabeEpoch(sender) => query!(CurrentBabeEpoch, current_epoch(), sender),
}
}
@@ -415,6 +422,7 @@ mod tests {
use sp_core::testing::TaskExecutor;
use std::{collections::{HashMap, BTreeMap}, sync::{Arc, Mutex}};
use futures::channel::oneshot;
+ use polkadot_node_primitives::BabeEpoch;
#[derive(Default, Clone)]
struct MockRuntimeApi {
@@ -432,6 +440,7 @@ mod tests {
candidate_events: Vec,
dmq_contents: HashMap>,
hrmp_channels: HashMap>>,
+ babe_epoch: Option,
}
impl ProvideRuntimeApi for MockRuntimeApi {
@@ -541,6 +550,38 @@ mod tests {
self.hrmp_channels.get(&recipient).map(|q| q.clone()).unwrap_or_default()
}
}
+
+ impl BabeApi for MockRuntimeApi {
+ fn configuration(&self) -> sp_consensus_babe::BabeGenesisConfiguration {
+ unimplemented!()
+ }
+
+ fn current_epoch_start(&self) -> sp_consensus_babe::Slot {
+ self.babe_epoch.as_ref().unwrap().start_slot
+ }
+
+ fn current_epoch(&self) -> BabeEpoch {
+ self.babe_epoch.as_ref().unwrap().clone()
+ }
+
+ fn next_epoch(&self) -> BabeEpoch {
+ unimplemented!()
+ }
+
+ fn generate_key_ownership_proof(
+ _slot: sp_consensus_babe::Slot,
+ _authority_id: sp_consensus_babe::AuthorityId,
+ ) -> Option {
+ None
+ }
+
+ fn submit_report_equivocation_unsigned_extrinsic(
+ _equivocation_proof: sp_consensus_babe::EquivocationProof,
+ _key_owner_proof: sp_consensus_babe::OpaqueKeyOwnershipProof,
+ ) -> Option<()> {
+ None
+ }
+ }
}
#[test]
@@ -1108,4 +1149,36 @@ mod tests {
futures::executor::block_on(future::join(subsystem_task, test_task));
}
+
+ #[test]
+ fn request_babe_epoch() {
+ let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new());
+ let mut runtime_api = MockRuntimeApi::default();
+ let epoch = BabeEpoch {
+ epoch_index: 100,
+ start_slot: sp_consensus_babe::Slot::from(1000),
+ duration: 10,
+ authorities: Vec::new(),
+ randomness: [1u8; 32],
+ };
+ runtime_api.babe_epoch = Some(epoch.clone());
+ let runtime_api = Arc::new(runtime_api);
+ let relay_parent = [1; 32].into();
+ let spawner = sp_core::testing::TaskExecutor::new();
+
+ let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner);
+ let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap());
+ let test_task = async move {
+ let (tx, rx) = oneshot::channel();
+
+ ctx_handle.send(FromOverseer::Communication {
+ msg: RuntimeApiMessage::Request(relay_parent, Request::CurrentBabeEpoch(tx))
+ }).await;
+
+ assert_eq!(rx.await.unwrap().unwrap(), epoch);
+ ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
+ };
+
+ futures::executor::block_on(future::join(subsystem_task, test_task));
+ }
}
diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs
index 2ecb41eea297..173ef1e0213b 100644
--- a/node/network/approval-distribution/src/lib.rs
+++ b/node/network/approval-distribution/src/lib.rs
@@ -350,6 +350,7 @@ impl State {
ctx.send_message(AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment(
assignment.clone(),
+ claimed_candidate_index,
tx,
))).await;
diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs
index 263b1c3075ad..5e0753749e2a 100644
--- a/node/network/approval-distribution/src/tests.rs
+++ b/node/network/approval-distribution/src/tests.rs
@@ -222,6 +222,7 @@ fn try_import_the_same_assignment() {
overseer_recv(overseer).await,
AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment(
assignment,
+ 0u32,
tx,
)) => {
assert_eq!(assignment, cert);
@@ -313,9 +314,11 @@ fn spam_attack_results_in_negative_reputation_change() {
overseer_recv(overseer).await,
AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment(
assignment,
+ claimed_candidate_index,
tx,
)) => {
assert_eq!(assignment, assignments[i].0);
+ assert_eq!(claimed_candidate_index, assignments[i].1);
tx.send(AssignmentCheckResult::Accepted).unwrap();
}
);
@@ -477,9 +480,11 @@ fn import_approval_bad() {
overseer_recv(overseer).await,
AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment(
assignment,
+ i,
tx,
)) => {
assert_eq!(assignment, cert);
+ assert_eq!(i, candidate_index);
tx.send(AssignmentCheckResult::Accepted).unwrap();
}
);
@@ -760,9 +765,11 @@ fn import_remotely_then_locally() {
overseer_recv(overseer).await,
AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment(
assignment,
+ i,
tx,
)) => {
assert_eq!(assignment, cert);
+ assert_eq!(i, candidate_index);
tx.send(AssignmentCheckResult::Accepted).unwrap();
}
);
diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs
index 72e352ed0127..8eed3bfc5e5f 100644
--- a/node/overseer/src/lib.rs
+++ b/node/overseer/src/lib.rs
@@ -1840,7 +1840,7 @@ where
let _ = self.approval_distribution_subsystem.send_message(msg).await;
},
AllMessages::ApprovalVoting(_msg) => {
- // FIXME: https://github.com/paritytech/polkadot/issues/1975
+ // FIXME: https://github.com/paritytech/polkadot/issues/2321
},
}
diff --git a/node/primitives/Cargo.toml b/node/primitives/Cargo.toml
index 58777d95526a..6259d114269c 100644
--- a/node/primitives/Cargo.toml
+++ b/node/primitives/Cargo.toml
@@ -12,5 +12,8 @@ polkadot-statement-table = { path = "../../statement-table" }
parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] }
runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-consensus-vrf = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" }
+schnorrkel = "0.9.1"
+thiserror = "1.0.22"
diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs
index 43188550fdb2..8303478aa53c 100644
--- a/node/primitives/src/approval.rs
+++ b/node/primitives/src/approval.rs
@@ -16,29 +16,44 @@
//! Types relevant for approval.
-pub use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof};
-pub use sp_consensus_slots::Slot;
+pub use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof, Randomness};
+pub use sp_consensus_babe::Slot;
use polkadot_primitives::v1::{
- CandidateHash, Hash, ValidatorIndex, Signed, ValidatorSignature, CoreIndex,
- BlockNumber, CandidateIndex,
+ CandidateHash, Hash, ValidatorIndex, ValidatorSignature, CoreIndex,
+ Header, BlockNumber, CandidateIndex,
};
use parity_scale_codec::{Encode, Decode};
+use sp_consensus_babe as babe_primitives;
+use sp_application_crypto::Public;
/// Validators assigning to check a particular candidate are split up into tranches.
/// Earlier tranches of validators check first, with later tranches serving as backup.
pub type DelayTranche = u32;
+/// A static context used to compute the Relay VRF story based on the
+/// VRF output included in the header-chain.
+pub const RELAY_VRF_STORY_CONTEXT: &[u8] = b"A&V RC-VRF";
+
/// A static context used for all relay-vrf-modulo VRFs.
pub const RELAY_VRF_MODULO_CONTEXT: &[u8] = b"A&V MOD";
-/// A static context used for all relay-vrf-delay VRFs.
-pub const RELAY_VRF_DELAY_CONTEXT: &[u8] = b"A&V TRANCHE";
+/// A static context used for all relay-vrf-modulo VRFs.
+pub const RELAY_VRF_DELAY_CONTEXT: &[u8] = b"A&V DELAY";
+
+/// A static context used for transcripts indicating assigned availability core.
+pub const ASSIGNED_CORE_CONTEXT: &[u8] = b"A&V ASSIGNED";
+
+/// A static context associated with producing randomness for a core.
+pub const CORE_RANDOMNESS_CONTEXT: &[u8] = b"A&V CORE";
+
+/// A static context associated with producing randomness for a tranche.
+pub const TRANCHE_RANDOMNESS_CONTEXT: &[u8] = b"A&V TRANCHE";
/// random bytes derived from the VRF submitted within the block by the
/// block author as a credential and used as input to approval assignment criteria.
#[derive(Debug, Clone, Encode, Decode, PartialEq)]
-pub struct RelayVRF(pub [u8; 32]);
+pub struct RelayVRFStory(pub [u8; 32]);
/// Different kinds of input data or criteria that can prove a validator's assignment
/// to check a particular parachain.
@@ -87,9 +102,6 @@ pub struct IndirectAssignmentCert {
#[derive(Debug, Clone, Encode, Decode)]
pub struct ApprovalVote(pub CandidateHash);
-/// An approval vote signed by some validator.
-pub type SignedApprovalVote = Signed