From 8362d1e1ed5c59f094b555bc4d631317fd4d12c7 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 26 Jun 2024 15:49:22 +0300 Subject: [PATCH 01/56] first prototype --- .../src/fragment_chain/mod.rs | 425 +++++++----------- .../core/prospective-parachains/src/lib.rs | 122 +++-- 2 files changed, 222 insertions(+), 325 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index f87d4820ff9a..22e2ccbbf3c4 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -87,10 +87,7 @@ mod tests; use std::{ - collections::{ - hash_map::{Entry, HashMap}, - BTreeMap, HashSet, - }, + collections::{BTreeMap, HashMap, HashSet}, sync::Arc, }; @@ -104,6 +101,8 @@ use polkadot_primitives::{ PersistedValidationData, }; +const EXTRA_UNCONNECTED_COUNT: usize = 2; + /// Kinds of failures to import a candidate into storage. #[derive(Debug, Clone, PartialEq)] pub enum CandidateStorageInsertionError { @@ -142,7 +141,7 @@ impl CandidateStorage { candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, state: CandidateState, - ) -> Result { + ) -> Result<(), CandidateStorageInsertionError> { let candidate_hash = candidate.hash(); if self.by_candidate_hash.contains_key(&candidate_hash) { return Err(CandidateStorageInsertionError::CandidateAlreadyKnown(candidate_hash)) @@ -179,28 +178,7 @@ impl CandidateStorage { // sanity-checked already. self.by_candidate_hash.insert(candidate_hash, entry); - Ok(candidate_hash) - } - - /// Remove a candidate from the store. - pub fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { - if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) { - if let Entry::Occupied(mut e) = self.by_parent_head.entry(entry.parent_head_data_hash()) - { - e.get_mut().remove(&candidate_hash); - if e.get().is_empty() { - e.remove(); - } - } - - if let Entry::Occupied(mut e) = self.by_output_head.entry(entry.output_head_data_hash()) - { - e.get_mut().remove(&candidate_hash); - if e.get().is_empty() { - e.remove(); - } - } - } + Ok(()) } /// Note that an existing candidate has been backed. @@ -273,7 +251,7 @@ impl CandidateStorage { /// We don't allow forks in a parachain, but we may have multiple candidates with same parent /// across different relay chain forks. That's why it returns an iterator (but only one will be /// valid and used in the end). - fn possible_para_children<'a>( + fn possible_backed_para_children<'a>( &'a self, parent_head_hash: &'a Hash, ) -> impl Iterator + 'a { @@ -282,7 +260,11 @@ impl CandidateStorage { .get(parent_head_hash) .into_iter() .flat_map(|hashes| hashes.iter()) - .filter_map(move |h| by_candidate_hash.get(h)) + .filter_map(move |h| { + by_candidate_hash.get(h).and_then(|candidate| { + (candidate.state == CandidateState::Backed).then_some(candidate) + }) + }) } #[cfg(test)] @@ -464,17 +446,6 @@ impl FragmentNode { } } -/// Response given by `can_add_candidate_as_potential` -#[derive(PartialEq, Debug)] -pub enum PotentialAddition { - /// Can be added as either connected or unconnected candidate. - Anyhow, - /// Can only be added as a connected candidate to the chain. - IfConnected, - /// Cannot be added. - None, -} - /// This is a chain of candidates based on some underlying storage of candidates and a scope. /// /// All nodes in the chain must be either pending availability or within the scope. Within the scope @@ -486,10 +457,8 @@ pub(crate) struct FragmentChain { candidates: HashSet, - // Index from head data hash to candidate hashes with that head data as a parent. + // Index from head data hash to the candidate hash with that head data as a parent. by_parent_head: HashMap, - // Index from head data hash to candidate hashes outputting that head data. - by_output_head: HashMap, } impl FragmentChain { @@ -509,7 +478,6 @@ impl FragmentChain { chain: Vec::new(), candidates: HashSet::new(), by_parent_head: HashMap::new(), - by_output_head: HashMap::new(), }; fragment_chain.populate_chain(storage); @@ -537,13 +505,6 @@ impl FragmentChain { self.chain.iter().map(|candidate| candidate.candidate_hash).collect() } - /// Try accumulating more candidates onto the chain. - /// - /// Candidates can only be added if they build on the already existing chain. - pub(crate) fn extend_from_storage(&mut self, storage: &CandidateStorage) { - self.populate_chain(storage); - } - /// Returns the hypothetical state of a candidate with the given hash and parent head data /// in regards to the existing chain. /// @@ -567,85 +528,16 @@ impl FragmentChain { return true } - let can_add_as_potential = self.can_add_candidate_as_potential( + self.can_add_candidate_as_potential( candidate_storage, &candidate.candidate_hash(), &candidate.relay_parent(), candidate.parent_head_data_hash(), candidate.output_head_data_hash(), - ); - - if can_add_as_potential == PotentialAddition::None { - return false - } - - let Some(candidate_relay_parent) = self.scope.ancestor(&candidate.relay_parent()) else { - // can_add_candidate_as_potential already checked for this, but just to be safe. - return false - }; - - let identity_modifications = ConstraintModifications::identity(); - let cumulative_modifications = if let Some(last_candidate) = self.chain.last() { - &last_candidate.cumulative_modifications - } else { - &identity_modifications - }; - - let child_constraints = - match self.scope.base_constraints.apply_modifications(&cumulative_modifications) { - Err(e) => { - gum::debug!( - target: LOG_TARGET, - new_parent_head = ?cumulative_modifications.required_parent, - ?candidate_hash, - err = ?e, - "Failed to apply modifications", - ); - - return false - }, - Ok(c) => c, - }; - - let parent_head_hash = candidate.parent_head_data_hash(); - if parent_head_hash == child_constraints.required_parent.hash() { - // We do additional checks for complete candidates. - if let HypotheticalCandidate::Complete { - ref receipt, - ref persisted_validation_data, - .. - } = candidate - { - if Fragment::check_against_constraints( - &candidate_relay_parent, - &child_constraints, - &receipt.commitments, - &receipt.descriptor().validation_code_hash, - persisted_validation_data, - ) - .is_err() - { - gum::debug!( - target: LOG_TARGET, - "Fragment::check_against_constraints() returned error", - ); - return false - } - } - - // If we got this far, it can be added to the chain right now. - true - } else if can_add_as_potential == PotentialAddition::Anyhow { - // Otherwise it is or can be an unconnected candidate, but only if PotentialAddition - // does not force us to only add a connected candidate. - true - } else { - false - } + ) } - /// Select `count` candidates after the given `ancestors` which pass - /// the predicate and have not already been backed on chain. + /// Select `count` candidates after the given `ancestors` which can be backed on chain next. /// /// The intention of the `ancestors` is to allow queries on the basis of /// one or more candidates which were previously pending availability becoming @@ -654,7 +546,6 @@ impl FragmentChain { &self, ancestors: Ancestors, count: u32, - pred: impl Fn(&CandidateHash) -> bool, ) -> Vec { if count == 0 { return vec![] @@ -665,9 +556,7 @@ impl FragmentChain { let mut res = Vec::with_capacity(actual_end_index - base_pos); for elem in &self.chain[base_pos..actual_end_index] { - if self.scope.get_pending_availability(&elem.candidate_hash).is_none() && - pred(&elem.candidate_hash) - { + if self.scope.get_pending_availability(&elem.candidate_hash).is_none() { res.push(elem.candidate_hash); } else { break @@ -715,6 +604,18 @@ impl FragmentChain { } } + fn earliest_relay_parent_pending_availability(&self) -> RelayChainBlockInfo { + self.chain + .iter() + .rev() + .find_map(|candidate| { + self.scope + .get_pending_availability(&candidate.candidate_hash) + .map(|c| c.relay_parent.clone()) + }) + .unwrap_or_else(|| self.scope.earliest_relay_parent()) + } + // Checks if this candidate could be added in the future to this chain. // This assumes that the chain does not already contain this candidate. It may or may not be // present in the `CandidateStorage`. @@ -727,14 +628,9 @@ impl FragmentChain { relay_parent: &Hash, parent_head_hash: Hash, output_head_hash: Option, - ) -> PotentialAddition { - // If we've got enough candidates for the configured depth, no point in adding more. - if self.chain.len() > self.scope.max_depth { - return PotentialAddition::None - } - + ) -> bool { if !self.check_potential(relay_parent, parent_head_hash, output_head_hash) { - return PotentialAddition::None + return false } let present_in_storage = storage.contains(candidate_hash); @@ -746,14 +642,7 @@ impl FragmentChain { ) .len(); - if (self.chain.len() + unconnected) < self.scope.max_depth { - PotentialAddition::Anyhow - } else if (self.chain.len() + unconnected) == self.scope.max_depth { - // If we've only one slot left to fill, it must be filled with a connected candidate. - PotentialAddition::IfConnected - } else { - PotentialAddition::None - } + (self.chain.len() + unconnected) <= (self.scope.max_depth + EXTRA_UNCONNECTED_COUNT) } // The candidates which are present in `CandidateStorage`, are not part of this chain but could @@ -773,7 +662,9 @@ impl FragmentChain { } } // We stop at max_depth + 1 with the search. There's no point in looping further. - if (self.chain.len() + candidates.len()) > self.scope.max_depth { + if (self.chain.len() + candidates.len()) > + (self.scope.max_depth + EXTRA_UNCONNECTED_COUNT) + { break } if !self.candidates.contains(&candidate.candidate_hash) && @@ -789,33 +680,17 @@ impl FragmentChain { candidates } - // Check if adding a candidate which transitions `parent_head_hash` to `output_head_hash` would - // introduce a fork or a cycle in the parachain. - // `output_head_hash` is optional because we sometimes make this check before retrieving the - // collation. - fn is_fork_or_cycle(&self, parent_head_hash: Hash, output_head_hash: Option) -> bool { - if self.by_parent_head.contains_key(&parent_head_hash) { - // fork. our parent has another child already + fn is_cycle(&self, parent_head_hash: Hash, output_head_hash: Hash) -> bool { + // trivial 0-length cycle. + if parent_head_hash == output_head_hash { return true } - if let Some(output_head_hash) = output_head_hash { - if self.by_output_head.contains_key(&output_head_hash) { - // this is not a chain, there are multiple paths to the same state. - return true - } - - // trivial 0-length cycle. - if parent_head_hash == output_head_hash { - return true - } - - // this should catch any other cycles. our output state cannot already be the parent - // state of another candidate, unless this is a cycle, since the already added - // candidates form a chain. - if self.by_parent_head.contains_key(&output_head_hash) { - return true - } + // this should catch any other cycles. our output state cannot already be the parent + // state of another candidate, unless this is a cycle, since the already added + // candidates form a chain. + if self.by_parent_head.contains_key(&output_head_hash) { + return true } false @@ -832,14 +707,15 @@ impl FragmentChain { parent_head_hash: Hash, output_head_hash: Option, ) -> bool { - if self.is_fork_or_cycle(parent_head_hash, output_head_hash) { - return false + if let Some(output_head_hash) = output_head_hash { + if self.is_cycle(parent_head_hash, output_head_hash) { + return false + } } - let Some(earliest_rp) = self.earliest_relay_parent() else { return false }; - let Some(relay_parent) = self.scope.ancestor(relay_parent) else { return false }; + let earliest_rp = self.earliest_relay_parent_pending_availability(); if relay_parent.number < earliest_rp.number { return false // relay parent moved backwards. } @@ -882,111 +758,150 @@ impl FragmentChain { let required_head_hash = child_constraints.required_parent.hash(); // Even though we don't allow parachain forks under the same active leaf, they may still // appear under different relay chain forks, hence the iterator below. - let possible_children = storage.possible_para_children(&required_head_hash); - let mut added_child = false; - for candidate in possible_children { - // Add one node to chain if - // 1. it does not introduce a fork or a cycle. - // 2. parent hash is correct. - // 3. relay-parent does not move backwards. - // 4. all non-pending-availability candidates have relay-parent in scope. - // 5. candidate outputs fulfill constraints - - if self.is_fork_or_cycle( - candidate.parent_head_data_hash(), - Some(candidate.output_head_data_hash()), - ) { - continue - } + let possible_children = storage + .possible_backed_para_children(&required_head_hash) + .filter_map(|candidate| { + // Add one node to chain if + // 1. it does not introduce a fork or a cycle. + // 2. parent hash is correct. + // 3. relay-parent does not move backwards. + // 4. all non-pending-availability candidates have relay-parent in scope. + // 5. candidate outputs fulfill constraints + + if self.is_cycle( + candidate.parent_head_data_hash(), + candidate.output_head_data_hash(), + ) { + return None + } - let pending = self.scope.get_pending_availability(&candidate.candidate_hash); - let Some(relay_parent) = pending - .map(|p| p.relay_parent.clone()) - .or_else(|| self.scope.ancestor(&candidate.relay_parent)) - else { - continue - }; + let pending = self.scope.get_pending_availability(&candidate.candidate_hash); + let Some(relay_parent) = pending + .map(|p| p.relay_parent.clone()) + .or_else(|| self.scope.ancestor(&candidate.relay_parent)) + else { + return None + }; + + // require: candidates don't move backwards + // and only pending availability candidates can be out-of-scope. + // + // earliest_rp can be before the earliest relay parent in the scope + // when the parent is a pending availability candidate as well, but + // only other pending candidates can have a relay parent out of scope. + let min_relay_parent_number = pending + .map(|p| match self.chain.len() { + 0 => p.relay_parent.number, + _ => earliest_rp.number, + }) + .unwrap_or_else(|| earliest_rp.number); + + if relay_parent.number < min_relay_parent_number { + return None // relay parent moved backwards. + } - // require: candidates don't move backwards - // and only pending availability candidates can be out-of-scope. - // - // earliest_rp can be before the earliest relay parent in the scope - // when the parent is a pending availability candidate as well, but - // only other pending candidates can have a relay parent out of scope. - let min_relay_parent_number = pending - .map(|p| match self.chain.len() { - 0 => p.relay_parent.number, - _ => earliest_rp.number, - }) - .unwrap_or_else(|| earliest_rp.number); - - if relay_parent.number < min_relay_parent_number { - continue // relay parent moved backwards. - } + // don't add candidates if they're already present in the chain. + // this can never happen, as candidates can only be duplicated if there's a + // cycle and we shouldn't have allowed for a cycle to be chained. + if self.contains_candidate(&candidate.candidate_hash) { + return None + } - // don't add candidates if they're already present in the chain. - // this can never happen, as candidates can only be duplicated if there's a cycle - // and we shouldn't have allowed for a cycle to be chained. - if self.contains_candidate(&candidate.candidate_hash) { - continue - } + let fragment = { + let mut constraints = child_constraints.clone(); + if let Some(ref p) = pending { + // overwrite for candidates pending availability as a special-case. + constraints.min_relay_parent_number = p.relay_parent.number; + } + + let f = Fragment::new( + relay_parent.clone(), + constraints, + // It's cheap to clone because it's wrapped in an Arc + candidate.candidate.clone(), + ); - let fragment = { - let mut constraints = child_constraints.clone(); - if let Some(ref p) = pending { - // overwrite for candidates pending availability as a special-case. - constraints.min_relay_parent_number = p.relay_parent.number; - } + match f { + Ok(f) => f, + Err(e) => { + gum::debug!( + target: LOG_TARGET, + err = ?e, + ?relay_parent, + candidate_hash = ?candidate.candidate_hash, + "Failed to instantiate fragment", + ); - let f = Fragment::new( - relay_parent.clone(), - constraints, - // It's cheap to clone because it's wrapped in an Arc - candidate.candidate.clone(), - ); + return None + }, + } + }; - match f { - Ok(f) => f, - Err(e) => { - gum::debug!( - target: LOG_TARGET, - err = ?e, - ?relay_parent, - candidate_hash = ?candidate.candidate_hash, - "Failed to instantiate fragment", - ); - - break - }, - } - }; + Some((fragment, candidate.candidate_hash, candidate.parent_head_data_hash)) + }); + + let best_candidate = possible_children.min_by(|child1, child2| child1.1.cmp(&child2.1)); + if let Some((fragment, candidate_hash, parent_head_data_hash)) = best_candidate { // Update the cumulative constraint modifications. cumulative_modifications.stack(fragment.constraint_modifications()); // Update the earliest rp - earliest_rp = relay_parent; + earliest_rp = fragment.relay_parent().clone(); let node = FragmentNode { fragment, - candidate_hash: candidate.candidate_hash, + candidate_hash, cumulative_modifications: cumulative_modifications.clone(), }; self.chain.push(node); - self.candidates.insert(candidate.candidate_hash); + self.candidates.insert(candidate_hash); // We've already checked for forks and cycles. - self.by_parent_head - .insert(candidate.parent_head_data_hash(), candidate.candidate_hash); - self.by_output_head - .insert(candidate.output_head_data_hash(), candidate.candidate_hash); - added_child = true; - // We can only add one child for a candidate. (it's a chain, not a tree) - break; + self.by_parent_head.insert(parent_head_data_hash, candidate_hash); + } else { + break } + } + } - if !added_child { - break + pub fn repopulate_chain( + &mut self, + storage: &CandidateStorage, + newly_backed_candidate: &CandidateHash, + ) { + let Some(newly_backed_candidate) = storage.by_candidate_hash.get(newly_backed_candidate) + else { + return + }; + + // Find out if there's already a candidate in the chain with this same parent head data. + // TODO: check that this is not a candidate pending availability. + if let Some(index) = self.chain.iter().position(|fragment_node| { + storage + .by_candidate_hash + .get(&fragment_node.candidate_hash) + .map(|candidate| candidate.parent_head_data_hash) == + Some(newly_backed_candidate.parent_head_data_hash) + }) { + // We need to adhere to the fork selection rule. + if newly_backed_candidate.candidate_hash < self.chain[index].candidate_hash { + // We remove all candidates from the chain starting from its parent + let drained_candidates = self.chain.drain(index.saturating_sub(1)..); + + for drained_candidate in drained_candidates { + self.candidates.remove(&drained_candidate.candidate_hash); + self.by_parent_head.remove( + &drained_candidate + .fragment + .candidate() + .persisted_validation_data + .parent_head + .hash(), + ); + } } } + + self.populate_chain(storage); } } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index e4b6deffdf4a..f5ab7ead2ab3 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -28,7 +28,7 @@ use std::collections::{HashMap, HashSet}; -use fragment_chain::{FragmentChain, PotentialAddition}; +use fragment_chain::FragmentChain; use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ @@ -463,11 +463,12 @@ async fn handle_introduce_seconded_candidate( persisted_validation_data: pvd, } = request; + let candidate_hash = candidate.hash(); let Some(storage) = view.candidate_storage.get_mut(¶) else { gum::warn!( target: LOG_TARGET, para_id = ?para, - candidate_hash = ?candidate.hash(), + ?candidate_hash, "Received seconded candidate for inactive para", ); @@ -478,14 +479,45 @@ async fn handle_introduce_seconded_candidate( let parent_head_hash = pvd.parent_head.hash(); let output_head_hash = Some(candidate.commitments.head_data.hash()); - // We first introduce the candidate in the storage and then try to extend the chain. - // If the candidate gets included in the chain, we can keep it in storage. - // If it doesn't, check that it's still a potential candidate in at least one fragment chain. - // If it's not, we can remove it. + let mut add_to_storage = false; + for (relay_parent, leaf_data) in view.active_leaves.iter_mut() { + if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { + match chain.can_add_candidate_as_potential( + &storage, + &candidate_hash, + &candidate.descriptor.relay_parent, + parent_head_hash, + output_head_hash, + ) { + true => { + gum::trace!( + target: LOG_TARGET, + para = ?para, + ?relay_parent, + ?candidate_hash, + "Added potential candidate to storage.", + ); + + add_to_storage = true; + }, + false => { + gum::trace!( + target: LOG_TARGET, + para = ?para, + ?relay_parent, + "Not introducing a new candidate: {:?}", + candidate_hash + ); + }, + } + } + } - let candidate_hash = + // If there is at least one leaf where this candidate can be added or potentially added in the + // future, add it to storage. + if add_to_storage { match storage.add_candidate(candidate.clone(), pvd, CandidateState::Seconded) { - Ok(c) => c, + Ok(_) => {}, Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { gum::debug!( target: LOG_TARGET, @@ -512,66 +544,7 @@ async fn handle_introduce_seconded_candidate( return }, }; - - let mut keep_in_storage = false; - for (relay_parent, leaf_data) in view.active_leaves.iter_mut() { - if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { - gum::trace!( - target: LOG_TARGET, - para = ?para, - ?relay_parent, - "Candidates in chain before trying to introduce a new one: {:?}", - chain.to_vec() - ); - chain.extend_from_storage(&*storage); - if chain.contains_candidate(&candidate_hash) { - keep_in_storage = true; - - gum::trace!( - target: LOG_TARGET, - ?relay_parent, - para = ?para, - ?candidate_hash, - "Added candidate to chain.", - ); - } else { - match chain.can_add_candidate_as_potential( - &storage, - &candidate_hash, - &candidate.descriptor.relay_parent, - parent_head_hash, - output_head_hash, - ) { - PotentialAddition::Anyhow => { - gum::trace!( - target: LOG_TARGET, - para = ?para, - ?relay_parent, - ?candidate_hash, - "Kept candidate as unconnected potential.", - ); - - keep_in_storage = true; - }, - _ => { - gum::trace!( - target: LOG_TARGET, - para = ?para, - ?relay_parent, - "Not introducing a new candidate: {:?}", - candidate_hash - ); - }, - } - } - } - } - - // If there is at least one leaf where this candidate can be added or potentially added in the - // future, keep it in storage. - if !keep_in_storage { - storage.remove_candidate(&candidate_hash); - + } else { gum::debug!( target: LOG_TARGET, para = ?para, @@ -580,7 +553,7 @@ async fn handle_introduce_seconded_candidate( ); } - let _ = tx.send(keep_in_storage); + let _ = tx.send(add_to_storage); } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] @@ -624,6 +597,15 @@ async fn handle_candidate_backed( } storage.mark_backed(&candidate_hash); + + // Now try repopulating the fragment chains. + for leaf_data in view.active_leaves.values_mut() { + if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { + chain.repopulate_chain(storage, &candidate_hash); + + // TODO: log here the added candidates: chain.contains_candidate(candidate) + } + } } fn answer_get_backable_candidates( @@ -687,7 +669,7 @@ fn answer_get_backable_candidates( ); let backable_candidates: Vec<_> = chain - .find_backable_chain(ancestors.clone(), count, |candidate| storage.is_backed(candidate)) + .find_backable_chain(ancestors.clone(), count) .into_iter() .filter_map(|child_hash| { storage.relay_parent_of_candidate(&child_hash).map_or_else( From d6348674c8abb4d3203bf6885cd441d2a7fd85e9 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 28 Jun 2024 15:30:04 +0300 Subject: [PATCH 02/56] first prototype part 2 --- .../src/fragment_chain/mod.rs | 248 ++++++++++++++---- .../core/prospective-parachains/src/lib.rs | 39 +-- 2 files changed, 196 insertions(+), 91 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 22e2ccbbf3c4..60968c8a1554 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -87,7 +87,10 @@ mod tests; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{ + hash_map::{Entry, HashMap}, + BTreeMap, HashSet, VecDeque, + }, sync::Arc, }; @@ -142,6 +145,7 @@ impl CandidateStorage { persisted_validation_data: PersistedValidationData, state: CandidateState, ) -> Result<(), CandidateStorageInsertionError> { + // Here, do not allow a cycle. let candidate_hash = candidate.hash(); if self.by_candidate_hash.contains_key(&candidate_hash) { return Err(CandidateStorageInsertionError::CandidateAlreadyKnown(candidate_hash)) @@ -167,6 +171,10 @@ impl CandidateStorage { }), }; + if self.is_cycle(entry.parent_head_data_hash(), entry.output_head_data_hash()) { + return Err(CandidateStorageInsertionError::Cycle) + } + self.by_parent_head .entry(entry.parent_head_data_hash()) .or_default() @@ -181,6 +189,44 @@ impl CandidateStorage { Ok(()) } + // TODO: this also needs to be done in getHypotheticalMembership + fn is_cycle(&self, parent_head_hash: Hash, output_head_hash: Hash) -> bool { + // trivial 0-length cycle. + if parent_head_hash == output_head_hash { + return true + } + + // this should catch any other cycles. our output state cannot already be the parent + // state of another candidate, unless this is a cycle, since the already added + // candidates form a chain. + if self.by_parent_head.contains_key(&output_head_hash) { + return true + } + + false + } + + /// Remove a candidate from the store. + pub fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { + if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) { + if let Entry::Occupied(mut e) = self.by_parent_head.entry(entry.parent_head_data_hash()) + { + e.get_mut().remove(&candidate_hash); + if e.get().is_empty() { + e.remove(); + } + } + + if let Entry::Occupied(mut e) = self.by_output_head.entry(entry.output_head_data_hash()) + { + e.get_mut().remove(&candidate_hash); + if e.get().is_empty() { + e.remove(); + } + } + } + } + /// Note that an existing candidate has been backed. pub fn mark_backed(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { @@ -459,6 +505,10 @@ pub(crate) struct FragmentChain { // Index from head data hash to the candidate hash with that head data as a parent. by_parent_head: HashMap, + // Index from head data hash to candidate hashes outputting that head data. + by_output_head: HashMap, + + unconnected: HashSet, } impl FragmentChain { @@ -478,10 +528,15 @@ impl FragmentChain { chain: Vec::new(), candidates: HashSet::new(), by_parent_head: HashMap::new(), + by_output_head: HashMap::new(), + unconnected: HashSet::new(), }; fragment_chain.populate_chain(storage); + let unconnected = fragment_chain.find_unconnected_potential_candidates(storage); + fragment_chain.unconnected = unconnected; + fragment_chain } @@ -500,6 +555,11 @@ impl FragmentChain { self.candidates.contains(candidate) } + /// Whether the candidate exists. + pub(crate) fn contains_unconnected_candidate(&self, candidate: &CandidateHash) -> bool { + self.unconnected.contains(candidate) + } + /// Return a vector of the chain's candidate hashes, in-order. pub(crate) fn to_vec(&self) -> Vec { self.chain.iter().map(|candidate| candidate.candidate_hash).collect() @@ -521,6 +581,12 @@ impl FragmentChain { candidate: HypotheticalCandidate, candidate_storage: &CandidateStorage, ) -> bool { + if let Some(output_head_hash) = candidate.output_head_data_hash() { + if candidate_storage.is_cycle(candidate.parent_head_data_hash(), output_head_hash) { + return false; + } + } + let candidate_hash = candidate.candidate_hash(); // If we've already used this candidate in the chain @@ -629,20 +695,42 @@ impl FragmentChain { parent_head_hash: Hash, output_head_hash: Option, ) -> bool { - if !self.check_potential(relay_parent, parent_head_hash, output_head_hash) { + if !self.check_potential( + storage, + &candidate_hash, + relay_parent, + parent_head_hash, + output_head_hash, + ) { return false } - let present_in_storage = storage.contains(candidate_hash); - - let unconnected = self - .find_unconnected_potential_candidates( - storage, - present_in_storage.then_some(candidate_hash), - ) - .len(); + // TODO: chain len < max_depth OR unconnected < EXTRA_UNCONNECTED_COUNT. we shouldn't mix + // the two?? + (self.chain.len() + self.unconnected.len()) <= + (self.scope.max_depth + EXTRA_UNCONNECTED_COUNT) + } - (self.chain.len() + unconnected) <= (self.scope.max_depth + EXTRA_UNCONNECTED_COUNT) + pub(crate) fn add_candidate_as_potential( + &self, + storage: &CandidateStorage, + candidate_hash: &CandidateHash, + relay_parent: &Hash, + parent_head_hash: Hash, + output_head_hash: Option, + ) -> bool { + if self.can_add_candidate_as_potential( + storage, + candidate_hash, + relay_parent, + parent_head_hash, + output_head_hash, + ) { + self.unconnected.insert(candidate_hash); + true + } else { + false + } } // The candidates which are present in `CandidateStorage`, are not part of this chain but could @@ -652,15 +740,9 @@ impl FragmentChain { pub(crate) fn find_unconnected_potential_candidates( &self, storage: &CandidateStorage, - ignore_candidate: Option<&CandidateHash>, ) -> Vec { let mut candidates = vec![]; for candidate in storage.candidates() { - if let Some(ignore_candidate) = ignore_candidate { - if ignore_candidate == &candidate.candidate_hash { - continue - } - } // We stop at max_depth + 1 with the search. There's no point in looping further. if (self.chain.len() + candidates.len()) > (self.scope.max_depth + EXTRA_UNCONNECTED_COUNT) @@ -669,6 +751,8 @@ impl FragmentChain { } if !self.candidates.contains(&candidate.candidate_hash) && self.check_potential( + storage, + &candidate.candidate_hash, &candidate.relay_parent, candidate.candidate.persisted_validation_data.parent_head.hash(), Some(candidate.candidate.commitments.head_data.hash()), @@ -680,20 +764,38 @@ impl FragmentChain { candidates } - fn is_cycle(&self, parent_head_hash: Hash, output_head_hash: Hash) -> bool { - // trivial 0-length cycle. - if parent_head_hash == output_head_hash { - return true - } + fn can_accept_fork( + &self, + storage: &CandidateStorage, + relay_parent_number: BlockNumber, + candidate_hash: &Hash, + parent_head_hash: Hash, + ) -> bool { + if let Some(other_candidate) = self.by_parent_head.get(&parent_head_hash) { + if self.scope().get_pending_availability(other_candidate).is_some() { + // Cannot accept a fork with a candidate pending availability. + return false + } - // this should catch any other cycles. our output state cannot already be the parent - // state of another candidate, unless this is a cycle, since the already added - // candidates form a chain. - if self.by_parent_head.contains_key(&output_head_hash) { - return true + // If the candidate is backed and in the current chain, accept only a candidate with a + // lower hash. + if other_candidate < candidate_hash { + return false + } + + // Now find the parent to check that the relay parent progresses. + let Some(parent_hash) = self.by_output_head.get(parent_head_hash) else { return false }; + let Some(parent) = storage.by_candidate_hash.get(parent_hash) else { return false }; + let Some(relay_parent_number_of_parent) = self.scope().ancestor(&parent.relay_parent) + else { + return false + }; + + relay_parent_number >= relay_parent_number_of_parent } - false + // Otherwise it's not a fork. + true } // Checks the potential of a candidate to be added to the chain in the future. @@ -703,24 +805,25 @@ impl FragmentChain { // collation. fn check_potential( &self, + storage: &CandidateStorage, + candidate_hash: &Hash, relay_parent: &Hash, parent_head_hash: Hash, output_head_hash: Option, ) -> bool { - if let Some(output_head_hash) = output_head_hash { - if self.is_cycle(parent_head_hash, output_head_hash) { - return false - } - } - let Some(relay_parent) = self.scope.ancestor(relay_parent) else { return false }; - let earliest_rp = self.earliest_relay_parent_pending_availability(); if relay_parent.number < earliest_rp.number { return false // relay parent moved backwards. } - true + self.can_accept_fork(storage, relay_parent.number, candidate_hash, parent_head_hash) + + // if is fork with something that is in chain already, only if it has a lower hash and + // satisfies relay parent constraints. + + // if is fork with something that is not in chain, accept. + // otherwise, accept if relay parent is within scope } // Populate the fragment chain with candidates from CandidateStorage. @@ -768,13 +871,6 @@ impl FragmentChain { // 4. all non-pending-availability candidates have relay-parent in scope. // 5. candidate outputs fulfill constraints - if self.is_cycle( - candidate.parent_head_data_hash(), - candidate.output_head_data_hash(), - ) { - return None - } - let pending = self.scope.get_pending_availability(&candidate.candidate_hash); let Some(relay_parent) = pending .map(|p| p.relay_parent.clone()) @@ -837,12 +933,21 @@ impl FragmentChain { } }; - Some((fragment, candidate.candidate_hash, candidate.parent_head_data_hash)) + Some(( + fragment, + candidate.candidate_hash, + candidate.output_head_data_hash, + candidate.parent_head_data_hash, + )) }); let best_candidate = possible_children.min_by(|child1, child2| child1.1.cmp(&child2.1)); - if let Some((fragment, candidate_hash, parent_head_data_hash)) = best_candidate { + // TODO: given that we just chose a best candidate, should we trim more? + + if let Some((fragment, candidate_hash, output_head_data_hash, parent_head_data_hash)) = + best_candidate + { // Update the cumulative constraint modifications. cumulative_modifications.stack(fragment.constraint_modifications()); // Update the earliest rp @@ -858,6 +963,8 @@ impl FragmentChain { self.candidates.insert(candidate_hash); // We've already checked for forks and cycles. self.by_parent_head.insert(parent_head_data_hash, candidate_hash); + self.by_output_head.insert(output_head_data_hash, candidate_hash); + self.unconnected.remove(&candidate_hash); } else { break } @@ -874,6 +981,8 @@ impl FragmentChain { return }; + let mut drained_candidates = None; + // Find out if there's already a candidate in the chain with this same parent head data. // TODO: check that this is not a candidate pending availability. if let Some(index) = self.chain.iter().position(|fragment_node| { @@ -886,22 +995,45 @@ impl FragmentChain { // We need to adhere to the fork selection rule. if newly_backed_candidate.candidate_hash < self.chain[index].candidate_hash { // We remove all candidates from the chain starting from its parent - let drained_candidates = self.chain.drain(index.saturating_sub(1)..); - - for drained_candidate in drained_candidates { - self.candidates.remove(&drained_candidate.candidate_hash); - self.by_parent_head.remove( - &drained_candidate - .fragment - .candidate() - .persisted_validation_data - .parent_head - .hash(), - ); - } + drained_candidates = Some(self.chain.drain(index.saturating_sub(1)..)); + } + } + + if let Some(drained_candidates) = drained_candidates.map(|c| c.iter()) { + for drained_candidate in drained_candidates { + self.candidates.remove(&drained_candidate.candidate_hash); + self.by_parent_head.remove( + &drained_candidate + .fragment + .candidate() + .persisted_validation_data + .parent_head + .hash(), + ); + self.by_output_head + .remove(&drained_candidate.fragment.candidate().commitments.head_data.hash()); } } self.populate_chain(storage); + + let Some(drained_candidates) = drained_candidates else { return }; + + let mut queue: VecDeque<_> = drained_candidates.collect(); + + while let Some(candidate) = queue.pop_front() { + let Some(children) = storage + .by_parent_head + .get(&candidate.fragment.candidate().commitments.head_data.hash()) + else { + continue + }; + + for child in children { + if self.unconnected.remove(child) { + queue.push_back(child); + } + } + } } } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index f5ab7ead2ab3..dcadbe75d809 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -73,7 +73,6 @@ const LOG_TARGET: &str = "parachain::prospective-parachains"; struct RelayBlockViewData { // Scheduling info for paras and upcoming paras. fragment_chains: HashMap, - pending_availability: HashSet, } struct View { @@ -221,8 +220,6 @@ async fn handle_active_leaves_update( let ancestry = fetch_ancestry(&mut *ctx, &mut temp_header_cache, hash, allowed_ancestry_len).await?; - let mut all_pending_availability = HashSet::new(); - // Find constraints. let mut fragment_chains = HashMap::new(); for para in scheduled_paras { @@ -243,8 +240,6 @@ async fn handle_active_leaves_update( continue }; - all_pending_availability.extend(pending_availability.iter().map(|c| c.candidate_hash)); - let pending_availability = preprocess_candidates_pending_availability( ctx, &mut temp_header_cache, @@ -311,10 +306,7 @@ async fn handle_active_leaves_update( fragment_chains.insert(para, chain); } - view.active_leaves.insert( - hash, - RelayBlockViewData { fragment_chains, pending_availability: all_pending_availability }, - ); + view.active_leaves.insert(hash, RelayBlockViewData { fragment_chains }); } if !update.deactivated.is_empty() { @@ -332,35 +324,13 @@ fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) { let mut live_candidates = HashSet::new(); let mut live_paras = HashSet::new(); for sub_view in active_leaves.values() { - live_candidates.extend(sub_view.pending_availability.iter().cloned()); - for (para_id, fragment_chain) in &sub_view.fragment_chains { live_candidates.extend(fragment_chain.to_vec()); + live_candidates.extend(fragment_chain.unconnected()); live_paras.insert(*para_id); } } - let connected_candidates_count = live_candidates.len(); - for (leaf, sub_view) in active_leaves.iter() { - for (para_id, fragment_chain) in &sub_view.fragment_chains { - if let Some(storage) = view.candidate_storage.get(para_id) { - let unconnected_potential = - fragment_chain.find_unconnected_potential_candidates(storage, None); - if !unconnected_potential.is_empty() { - gum::trace!( - target: LOG_TARGET, - ?leaf, - "Keeping {} unconnected candidates for paraid {} in storage: {:?}", - unconnected_potential.len(), - para_id, - unconnected_potential - ); - } - live_candidates.extend(unconnected_potential); - } - } - } - view.candidate_storage.retain(|para_id, storage| { if !live_paras.contains(¶_id) { return false @@ -482,7 +452,7 @@ async fn handle_introduce_seconded_candidate( let mut add_to_storage = false; for (relay_parent, leaf_data) in view.active_leaves.iter_mut() { if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { - match chain.can_add_candidate_as_potential( + match chain.add_candidate_as_potential( &storage, &candidate_hash, &candidate.descriptor.relay_parent, @@ -539,6 +509,7 @@ async fn handle_introduce_seconded_candidate( para = ?para, "Received seconded candidate had mismatching validation data", ); + // TODO: if this fails, we have an invalid candidate in the unconnected storage. let _ = tx.send(false); return @@ -606,6 +577,8 @@ async fn handle_candidate_backed( // TODO: log here the added candidates: chain.contains_candidate(candidate) } } + + prune_view_candidate_storage(view, storage); } fn answer_get_backable_candidates( From ded4ee97ea85854ea416c9cab708329255d03104 Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 1 Jul 2024 19:07:37 +0300 Subject: [PATCH 03/56] first sane version --- .../src/fragment_chain/mod.rs | 546 ++++++++++-------- .../core/prospective-parachains/src/lib.rs | 353 +++++------ polkadot/node/subsystem-types/src/messages.rs | 40 +- .../src/inclusion_emulator/mod.rs | 62 +- 4 files changed, 516 insertions(+), 485 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 60968c8a1554..04701bdd5492 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -87,6 +87,7 @@ mod tests; use std::{ + cmp::Ordering, collections::{ hash_map::{Entry, HashMap}, BTreeMap, HashSet, VecDeque, @@ -97,14 +98,15 @@ use std::{ use super::LOG_TARGET; use polkadot_node_subsystem::messages::{Ancestors, HypotheticalCandidate}; use polkadot_node_subsystem_util::inclusion_emulator::{ - ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, + ConstraintModifications, Constraints, Fragment, HypotheticalOrConcreteCandidate, + ProspectiveCandidate, RelayChainBlockInfo, }; use polkadot_primitives::{ BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, PersistedValidationData, }; -const EXTRA_UNCONNECTED_COUNT: usize = 2; +const EXTRA_UNCONNECTED_COUNT: usize = 10; /// Kinds of failures to import a candidate into storage. #[derive(Debug, Clone, PartialEq)] @@ -114,6 +116,8 @@ pub enum CandidateStorageInsertionError { PersistedValidationDataMismatch, /// The candidate was already known. CandidateAlreadyKnown(CandidateHash), + /// TODO + Cycle, } /// Stores candidates and information about them such as their relay-parents and their backing @@ -132,6 +136,7 @@ pub(crate) struct CandidateStorage { // Even though having multiple candidates with same output would be invalid for a parachain, // it could happen across different relay chain forks. by_output_head: HashMap>, + // TODO: I don't think we can have multiple candidates with the same output head really. // Index from candidate hash to fragment node. by_candidate_hash: HashMap, @@ -151,29 +156,12 @@ impl CandidateStorage { return Err(CandidateStorageInsertionError::CandidateAlreadyKnown(candidate_hash)) } - if persisted_validation_data.hash() != candidate.descriptor.persisted_validation_data_hash { - return Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) - } + let entry = + CandidateEntry::new(candidate_hash, candidate, persisted_validation_data, state)?; - let entry = CandidateEntry { - candidate_hash, - parent_head_data_hash: persisted_validation_data.parent_head.hash(), - output_head_data_hash: candidate.commitments.head_data.hash(), - relay_parent: candidate.descriptor.relay_parent, - state, - candidate: Arc::new(ProspectiveCandidate { - commitments: candidate.commitments, - collator: candidate.descriptor.collator, - collator_signature: candidate.descriptor.signature, - persisted_validation_data, - pov_hash: candidate.descriptor.pov_hash, - validation_code_hash: candidate.descriptor.validation_code_hash, - }), - }; - - if self.is_cycle(entry.parent_head_data_hash(), entry.output_head_data_hash()) { - return Err(CandidateStorageInsertionError::Cycle) - } + // if self.is_cycle(entry.parent_head_data_hash(), entry.output_head_data_hash()) { + // return Err(CandidateStorageInsertionError::Cycle) + // } self.by_parent_head .entry(entry.parent_head_data_hash()) @@ -189,21 +177,31 @@ impl CandidateStorage { Ok(()) } - // TODO: this also needs to be done in getHypotheticalMembership - fn is_cycle(&self, parent_head_hash: Hash, output_head_hash: Hash) -> bool { - // trivial 0-length cycle. - if parent_head_hash == output_head_hash { - return true + fn add_candidate_entry( + &mut self, + candidate: CandidateEntry, + ) -> Result<(), CandidateStorageInsertionError> { + let candidate_hash = candidate.candidate_hash; + if self.by_candidate_hash.contains_key(&candidate_hash) { + return Err(CandidateStorageInsertionError::CandidateAlreadyKnown(candidate_hash)) } - // this should catch any other cycles. our output state cannot already be the parent - // state of another candidate, unless this is a cycle, since the already added - // candidates form a chain. - if self.by_parent_head.contains_key(&output_head_hash) { - return true - } + // if self.is_cycle(candidate.parent_head_data_hash(), candidate.output_head_data_hash()) { + // return Err(CandidateStorageInsertionError::Cycle) + // } - false + self.by_parent_head + .entry(candidate.parent_head_data_hash()) + .or_default() + .insert(candidate_hash); + self.by_output_head + .entry(candidate.output_head_data_hash()) + .or_default() + .insert(candidate_hash); + // sanity-checked already. + self.by_candidate_hash.insert(candidate_hash, candidate); + + Ok(()) } /// Remove a candidate from the store. @@ -228,22 +226,17 @@ impl CandidateStorage { } /// Note that an existing candidate has been backed. - pub fn mark_backed(&mut self, candidate_hash: &CandidateHash) { + fn mark_backed(&mut self, candidate_hash: &CandidateHash) -> bool { if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { gum::trace!(target: LOG_TARGET, ?candidate_hash, "Candidate marked as backed"); entry.state = CandidateState::Backed; + true } else { gum::trace!(target: LOG_TARGET, ?candidate_hash, "Candidate not found while marking as backed"); + false } } - /// Whether a candidate is recorded as being backed. - pub fn is_backed(&self, candidate_hash: &CandidateHash) -> bool { - self.by_candidate_hash - .get(candidate_hash) - .map_or(false, |e| e.state == CandidateState::Backed) - } - /// Whether a candidate is contained within the storage already. pub fn contains(&self, candidate_hash: &CandidateHash) -> bool { self.by_candidate_hash.contains_key(candidate_hash) @@ -254,19 +247,6 @@ impl CandidateStorage { self.by_candidate_hash.values() } - /// Retain only candidates which pass the predicate. - pub(crate) fn retain(&mut self, pred: impl Fn(&CandidateHash) -> bool) { - self.by_candidate_hash.retain(|h, _v| pred(h)); - self.by_parent_head.retain(|_parent, children| { - children.retain(|h| pred(h)); - !children.is_empty() - }); - self.by_output_head.retain(|_output, candidates| { - candidates.retain(|h| pred(h)); - !candidates.is_empty() - }); - } - /// Get head-data by hash. pub(crate) fn head_data_by_hash(&self, hash: &Hash) -> Option<&HeadData> { // First, search for candidates outputting this head data and extract the head data @@ -288,11 +268,6 @@ impl CandidateStorage { }) } - /// Returns candidate's relay parent, if present. - pub(crate) fn relay_parent_of_candidate(&self, candidate_hash: &CandidateHash) -> Option { - self.by_candidate_hash.get(candidate_hash).map(|entry| entry.relay_parent) - } - /// Returns the candidates which have the given head data hash as parent. /// We don't allow forks in a parachain, but we may have multiple candidates with same parent /// across different relay chain forks. That's why it returns an iterator (but only one will be @@ -313,9 +288,8 @@ impl CandidateStorage { }) } - #[cfg(test)] - pub fn len(&self) -> (usize, usize) { - (self.by_parent_head.len(), self.by_candidate_hash.len()) + pub fn len(&self) -> usize { + self.by_candidate_hash.len() } } @@ -341,8 +315,31 @@ pub(crate) struct CandidateEntry { } impl CandidateEntry { - pub fn hash(&self) -> CandidateHash { - self.candidate_hash + pub fn new( + candidate_hash: CandidateHash, + candidate: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + state: CandidateState, + ) -> Result { + if persisted_validation_data.hash() != candidate.descriptor.persisted_validation_data_hash { + return Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) + } + + Ok(Self { + candidate_hash, + parent_head_data_hash: persisted_validation_data.parent_head.hash(), + output_head_data_hash: candidate.commitments.head_data.hash(), + relay_parent: candidate.descriptor.relay_parent, + state, + candidate: Arc::new(ProspectiveCandidate { + commitments: candidate.commitments, + collator: candidate.descriptor.collator, + collator_signature: candidate.descriptor.signature, + persisted_validation_data, + pov_hash: candidate.descriptor.pov_hash, + validation_code_hash: candidate.descriptor.validation_code_hash, + }), + }) } pub fn parent_head_data_hash(&self) -> Hash { @@ -484,6 +481,8 @@ pub struct FragmentNode { fragment: Fragment, candidate_hash: CandidateHash, cumulative_modifications: ConstraintModifications, + parent_head_data_hash: Hash, + output_head_data_hash: Hash, } impl FragmentNode { @@ -508,12 +507,12 @@ pub(crate) struct FragmentChain { // Index from head data hash to candidate hashes outputting that head data. by_output_head: HashMap, - unconnected: HashSet, + unconnected: CandidateStorage, } impl FragmentChain { /// Create a new [`FragmentChain`] with given scope and populated from the storage. - pub fn populate(scope: Scope, storage: &CandidateStorage) -> Self { + pub fn populate(scope: Scope, parent_candidates: &mut CandidateStorage) -> Self { gum::trace!( target: LOG_TARGET, relay_parent = ?scope.relay_parent.hash, @@ -529,13 +528,15 @@ impl FragmentChain { candidates: HashSet::new(), by_parent_head: HashMap::new(), by_output_head: HashMap::new(), - unconnected: HashSet::new(), + unconnected: CandidateStorage::default(), }; - fragment_chain.populate_chain(storage); + fragment_chain.populate_chain(parent_candidates); - let unconnected = fragment_chain.find_unconnected_potential_candidates(storage); - fragment_chain.unconnected = unconnected; + // Trim the forks that we know can no longer make it on-chain. + fragment_chain.trim_uneligible_forks(parent_candidates); + + fragment_chain.populate_unconnected_potential_candidates(parent_candidates); fragment_chain } @@ -555,16 +556,28 @@ impl FragmentChain { self.candidates.contains(candidate) } - /// Whether the candidate exists. - pub(crate) fn contains_unconnected_candidate(&self, candidate: &CandidateHash) -> bool { - self.unconnected.contains(candidate) - } - /// Return a vector of the chain's candidate hashes, in-order. pub(crate) fn to_vec(&self) -> Vec { self.chain.iter().map(|candidate| candidate.candidate_hash).collect() } + pub fn as_candidate_storage(&self) -> CandidateStorage { + let mut storage = self.unconnected.clone(); + + for candidate in self.chain.iter() { + let _ = storage.add_candidate_entry(CandidateEntry { + candidate_hash: candidate.candidate_hash, + parent_head_data_hash: candidate.parent_head_data_hash, + output_head_data_hash: candidate.output_head_data_hash, + relay_parent: candidate.relay_parent(), + candidate: candidate.fragment.candidate_clone(), // This clone is very cheap. + state: CandidateState::Backed, + }); // TODO: is it really ok to hide the error here? + } + + storage + } + /// Returns the hypothetical state of a candidate with the given hash and parent head data /// in regards to the existing chain. /// @@ -576,31 +589,72 @@ impl FragmentChain { /// /// If this returns false, the candidate could never be added to the current chain (not now, not /// ever) - pub(crate) fn hypothetical_membership( - &self, - candidate: HypotheticalCandidate, - candidate_storage: &CandidateStorage, - ) -> bool { + pub(crate) fn hypothetical_membership(&self, candidate: HypotheticalCandidate) -> bool { if let Some(output_head_hash) = candidate.output_head_data_hash() { - if candidate_storage.is_cycle(candidate.parent_head_data_hash(), output_head_hash) { + if self.is_cycle(candidate.parent_head_data_hash(), output_head_hash) { return false; } } let candidate_hash = candidate.candidate_hash(); - // If we've already used this candidate in the chain - if self.candidates.contains(&candidate_hash) { + // If we've already used this candidate in the chain or in the unconnected storage. + if self.candidates.contains(&candidate_hash) || self.unconnected.contains(&candidate_hash) { + return true + } + + self.can_add_candidate_as_potential(&candidate_hash, &candidate) + } + + // TODO: this needs modifying. + fn is_cycle(&self, parent_head_hash: Hash, output_head_hash: Hash) -> bool { + // trivial 0-length cycle. + if parent_head_hash == output_head_hash { + return true + } + + // this should catch any other cycles. our output state cannot already be the parent + // state of another candidate, unless this is a cycle, since the already added + // candidates form a chain. + if self.by_parent_head.contains_key(&output_head_hash) { return true } - self.can_add_candidate_as_potential( - candidate_storage, - &candidate.candidate_hash(), - &candidate.relay_parent(), - candidate.parent_head_data_hash(), - candidate.output_head_data_hash(), - ) + false + } + + pub(crate) fn get_head_data_by_hash(&self, head_data_hash: &Hash) -> Option { + let required_parent = &self.scope.base_constraints().required_parent; + if &required_parent.hash() == head_data_hash { + return Some(required_parent.clone()) + } + + let has_head_data_in_chain = self + .by_parent_head + .get(head_data_hash) + .or_else(|| self.by_output_head.get(head_data_hash)) + .is_some(); + + if has_head_data_in_chain { + return self.chain.iter().find_map(|candidate| { + if &candidate.parent_head_data_hash == head_data_hash { + Some( + candidate + .fragment + .candidate() + .persisted_validation_data + .parent_head + .clone(), + ) + } else if &candidate.output_head_data_hash == head_data_hash { + Some(candidate.fragment.candidate().commitments.head_data.clone()) + } else { + None + } + }); + } + + self.unconnected.head_data_by_hash(head_data_hash).cloned() } /// Select `count` candidates after the given `ancestors` which can be backed on chain next. @@ -612,7 +666,7 @@ impl FragmentChain { &self, ancestors: Ancestors, count: u32, - ) -> Vec { + ) -> Vec<(CandidateHash, Hash)> { if count == 0 { return vec![] } @@ -623,7 +677,7 @@ impl FragmentChain { for elem in &self.chain[base_pos..actual_end_index] { if self.scope.get_pending_availability(&elem.candidate_hash).is_none() { - res.push(elem.candidate_hash); + res.push((elem.candidate_hash, elem.relay_parent())); } else { break } @@ -687,49 +741,39 @@ impl FragmentChain { // present in the `CandidateStorage`. // Even if the candidate is a potential candidate, this function will indicate that it can be // kept only if there's enough room for it. - pub(crate) fn can_add_candidate_as_potential( + fn can_add_candidate_as_potential( &self, - storage: &CandidateStorage, candidate_hash: &CandidateHash, - relay_parent: &Hash, - parent_head_hash: Hash, - output_head_hash: Option, + candidate: &impl HypotheticalOrConcreteCandidate, ) -> bool { - if !self.check_potential( - storage, - &candidate_hash, - relay_parent, - parent_head_hash, - output_head_hash, - ) { + if !self.check_potential(candidate_hash, candidate) { return false } // TODO: chain len < max_depth OR unconnected < EXTRA_UNCONNECTED_COUNT. we shouldn't mix - // the two?? - (self.chain.len() + self.unconnected.len()) <= + // the two?? NO. here we should have the max number of candidates in the tree! + (self.chain.len() + self.unconnected.by_candidate_hash.len()) <= (self.scope.max_depth + EXTRA_UNCONNECTED_COUNT) } - pub(crate) fn add_candidate_as_potential( - &self, - storage: &CandidateStorage, - candidate_hash: &CandidateHash, - relay_parent: &Hash, - parent_head_hash: Hash, - output_head_hash: Option, - ) -> bool { - if self.can_add_candidate_as_potential( - storage, - candidate_hash, - relay_parent, - parent_head_hash, - output_head_hash, - ) { - self.unconnected.insert(candidate_hash); - true + pub(crate) fn try_adding_seconded_candidate( + &mut self, + candidate: &CandidateEntry, + ) -> Result { + if self.candidates.contains(&candidate.candidate_hash) || + self.unconnected.contains(&candidate.candidate_hash) + { + return Err(CandidateStorageInsertionError::CandidateAlreadyKnown( + candidate.candidate_hash, + )) + } + + if self.can_add_candidate_as_potential(&candidate.candidate_hash, &*candidate.candidate) { + // This clone is cheap, as it uses an Arc for the expensive stuff. + self.unconnected.add_candidate_entry(candidate.clone())?; + Ok(true) } else { - false + Ok(false) } } @@ -737,100 +781,140 @@ impl FragmentChain { // become part of this chain in the future. Capped at the max depth minus the existing chain // length. // If `ignore_candidate` is supplied and found in storage, it won't be counted. - pub(crate) fn find_unconnected_potential_candidates( - &self, - storage: &CandidateStorage, - ) -> Vec { - let mut candidates = vec![]; - for candidate in storage.candidates() { + fn populate_unconnected_potential_candidates(&mut self, old_storage: &CandidateStorage) { + for candidate in old_storage.candidates() { + // Sanity check, all pending availability candidates should be already present in the + // chain. + if self.scope.get_pending_availability(&candidate.candidate_hash).is_some() { + continue + } // We stop at max_depth + 1 with the search. There's no point in looping further. - if (self.chain.len() + candidates.len()) > + if (self.chain.len() + self.unconnected.len()) > (self.scope.max_depth + EXTRA_UNCONNECTED_COUNT) { break } if !self.candidates.contains(&candidate.candidate_hash) && - self.check_potential( - storage, - &candidate.candidate_hash, - &candidate.relay_parent, - candidate.candidate.persisted_validation_data.parent_head.hash(), - Some(candidate.candidate.commitments.head_data.hash()), - ) { - candidates.push(candidate.candidate_hash); + self.check_potential(&candidate.candidate_hash, &*candidate.candidate) + { + // This is cheap because the expensive stuff is wrapped in an Arc + let _ = self.unconnected.add_candidate_entry(candidate.clone()); // TODO: is it ok to hide + // result here? } } - - candidates } - fn can_accept_fork( + // Checks the potential of a candidate to be added to the chain in the future. + // Verifies that the relay parent is in scope and not moving backwards and that we're not + // introducing forks or cycles with other candidates in the chain. + // `output_head_hash` is optional because we sometimes make this check before retrieving the + // collation. + fn check_potential( &self, - storage: &CandidateStorage, - relay_parent_number: BlockNumber, - candidate_hash: &Hash, - parent_head_hash: Hash, + candidate_hash: &CandidateHash, + candidate: &impl HypotheticalOrConcreteCandidate, ) -> bool { - if let Some(other_candidate) = self.by_parent_head.get(&parent_head_hash) { - if self.scope().get_pending_availability(other_candidate).is_some() { - // Cannot accept a fork with a candidate pending availability. - return false - } + let relay_parent = candidate.relay_parent(); + let parent_head_hash = candidate.parent_head_data_hash(); - // If the candidate is backed and in the current chain, accept only a candidate with a - // lower hash. - if other_candidate < candidate_hash { - return false + let Some(relay_parent) = self.scope.ancestor(&relay_parent) else { return false }; + let earliest_rp = self.earliest_relay_parent_pending_availability(); + if relay_parent.number < earliest_rp.number { + return false // relay parent moved backwards. + } + + let constraints = if let Some(parent_candidate) = self.by_output_head.get(&parent_head_hash) + { + // Check if it's a fork. + if let Some(other_candidate) = self.by_parent_head.get(&parent_head_hash) { + if self.scope().get_pending_availability(other_candidate).is_some() { + // Cannot accept a fork with a candidate pending availability. + return false + } + + // If the candidate is backed and in the current chain, accept only a candidate with + // a lower hash. + if other_candidate < candidate_hash { + return false + } } - // Now find the parent to check that the relay parent progresses. - let Some(parent_hash) = self.by_output_head.get(parent_head_hash) else { return false }; - let Some(parent) = storage.by_candidate_hash.get(parent_hash) else { return false }; - let Some(relay_parent_number_of_parent) = self.scope().ancestor(&parent.relay_parent) + let Some(parent_candidate) = + self.chain.iter().find(|c| &c.candidate_hash == parent_candidate) + else { + return false + }; + let Ok(constraints) = self + .scope + .base_constraints + .apply_modifications(&parent_candidate.cumulative_modifications) else { return false }; + constraints + // Check if it builds on the latest included candidate + } else if self.scope.base_constraints.required_parent.hash() == parent_head_hash { + self.scope.base_constraints.clone() + } else { + return true + }; - relay_parent_number >= relay_parent_number_of_parent + // We do additional checks for complete candidates. + if let (Some(commitments), Some(pvd), Some(validation_code_hash)) = ( + candidate.commitments(), + candidate.persisted_validation_data(), + candidate.validation_code_hash(), + ) { + if Fragment::check_against_constraints( + &relay_parent, + &constraints, + commitments, + validation_code_hash, + pvd, + ) + .is_err() + { + gum::debug!( + target: LOG_TARGET, + "Fragment::check_against_constraints() returned error", + ); + return false + } + } else { + // Otherwise, at least check the relay parent progresses. + return relay_parent.number >= constraints.min_relay_parent_number } - // Otherwise it's not a fork. true } - // Checks the potential of a candidate to be added to the chain in the future. - // Verifies that the relay parent is in scope and not moving backwards and that we're not - // introducing forks or cycles with other candidates in the chain. - // `output_head_hash` is optional because we sometimes make this check before retrieving the - // collation. - fn check_potential( - &self, - storage: &CandidateStorage, - candidate_hash: &Hash, - relay_parent: &Hash, - parent_head_hash: Hash, - output_head_hash: Option, - ) -> bool { - let Some(relay_parent) = self.scope.ancestor(relay_parent) else { return false }; - let earliest_rp = self.earliest_relay_parent_pending_availability(); - if relay_parent.number < earliest_rp.number { - return false // relay parent moved backwards. - } + fn trim_uneligible_forks(&self, storage: &mut CandidateStorage) { + let mut queue: VecDeque<_> = self.chain.iter().map(|c| c.parent_head_data_hash).collect(); - self.can_accept_fork(storage, relay_parent.number, candidate_hash, parent_head_hash) + // TODO: if there's a cycle or multiple paths to the same node, this will loop forever. + while let Some(parent) = queue.pop_front() { + let Some(children) = storage.by_parent_head.get(&parent) else { continue }; + let mut to_remove = vec![]; - // if is fork with something that is in chain already, only if it has a lower hash and - // satisfies relay parent constraints. + for child_hash in children.iter() { + let Some(child) = storage.by_candidate_hash.get(child_hash) else { continue }; + if !self.check_potential(child_hash, &*child.candidate) { + to_remove.push(*child_hash); + queue.push_back(child.output_head_data_hash); + } + } - // if is fork with something that is not in chain, accept. - // otherwise, accept if relay parent is within scope + for hash in to_remove { + storage.remove_candidate(&hash); + } + } } // Populate the fragment chain with candidates from CandidateStorage. // Can be called by the constructor or when introducing a new candidate. // If we're introducing a new candidate onto an existing chain, we may introduce more than one, // since we may connect already existing candidates to the chain. - fn populate_chain(&mut self, storage: &CandidateStorage) { + fn populate_chain(&mut self, storage: &mut CandidateStorage) { let mut cumulative_modifications = if let Some(last_candidate) = self.chain.last() { last_candidate.cumulative_modifications.clone() } else { @@ -861,6 +945,7 @@ impl FragmentChain { let required_head_hash = child_constraints.required_parent.hash(); // Even though we don't allow parachain forks under the same active leaf, they may still // appear under different relay chain forks, hence the iterator below. + let possible_children = storage .possible_backed_para_children(&required_head_hash) .filter_map(|candidate| { @@ -941,13 +1026,22 @@ impl FragmentChain { )) }); - let best_candidate = possible_children.min_by(|child1, child2| child1.1.cmp(&child2.1)); - - // TODO: given that we just chose a best candidate, should we trim more? + let best_candidate = possible_children.min_by(|child1, child2| { + // Always pick a candidate pending availability as best. + if self.scope.get_pending_availability(&child1.1).is_some() { + Ordering::Less + } else if self.scope.get_pending_availability(&child2.1).is_some() { + Ordering::Greater + } else { + child1.1.cmp(&child2.1) + } + }); if let Some((fragment, candidate_hash, output_head_data_hash, parent_head_data_hash)) = best_candidate { + storage.remove_candidate(&candidate_hash); + // Update the cumulative constraint modifications. cumulative_modifications.stack(fragment.constraint_modifications()); // Update the earliest rp @@ -956,6 +1050,8 @@ impl FragmentChain { let node = FragmentNode { fragment, candidate_hash, + parent_head_data_hash, + output_head_data_hash, cumulative_modifications: cumulative_modifications.clone(), }; @@ -964,76 +1060,20 @@ impl FragmentChain { // We've already checked for forks and cycles. self.by_parent_head.insert(parent_head_data_hash, candidate_hash); self.by_output_head.insert(output_head_data_hash, candidate_hash); - self.unconnected.remove(&candidate_hash); } else { break } } } - pub fn repopulate_chain( - &mut self, - storage: &CandidateStorage, - newly_backed_candidate: &CandidateHash, - ) { - let Some(newly_backed_candidate) = storage.by_candidate_hash.get(newly_backed_candidate) - else { - return - }; - - let mut drained_candidates = None; - - // Find out if there's already a candidate in the chain with this same parent head data. - // TODO: check that this is not a candidate pending availability. - if let Some(index) = self.chain.iter().position(|fragment_node| { - storage - .by_candidate_hash - .get(&fragment_node.candidate_hash) - .map(|candidate| candidate.parent_head_data_hash) == - Some(newly_backed_candidate.parent_head_data_hash) - }) { - // We need to adhere to the fork selection rule. - if newly_backed_candidate.candidate_hash < self.chain[index].candidate_hash { - // We remove all candidates from the chain starting from its parent - drained_candidates = Some(self.chain.drain(index.saturating_sub(1)..)); - } - } - - if let Some(drained_candidates) = drained_candidates.map(|c| c.iter()) { - for drained_candidate in drained_candidates { - self.candidates.remove(&drained_candidate.candidate_hash); - self.by_parent_head.remove( - &drained_candidate - .fragment - .candidate() - .persisted_validation_data - .parent_head - .hash(), - ); - self.by_output_head - .remove(&drained_candidate.fragment.candidate().commitments.head_data.hash()); - } + pub fn candidate_backed(mut self, newly_backed_candidate: &CandidateHash) -> Self { + if !self.unconnected.mark_backed(newly_backed_candidate) { + return self } - self.populate_chain(storage); - - let Some(drained_candidates) = drained_candidates else { return }; - - let mut queue: VecDeque<_> = drained_candidates.collect(); - - while let Some(candidate) = queue.pop_front() { - let Some(children) = storage - .by_parent_head - .get(&candidate.fragment.candidate().commitments.head_data.hash()) - else { - continue - }; + // TODO: if is already in chain, it's an error. - for child in children { - if self.unconnected.remove(child) { - queue.push_back(child); - } - } - } + let mut old_storage = self.as_candidate_storage(); + Self::populate(self.scope, &mut old_storage) } } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index dcadbe75d809..92d55e91fa20 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -28,7 +28,7 @@ use std::collections::{HashMap, HashSet}; -use fragment_chain::FragmentChain; +use fragment_chain::{CandidateEntry, FragmentChain}; use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ @@ -54,10 +54,7 @@ use polkadot_primitives::{ use crate::{ error::{FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, - fragment_chain::{ - CandidateState, CandidateStorage, CandidateStorageInsertionError, - Scope as FragmentChainScope, - }, + fragment_chain::{CandidateState, CandidateStorageInsertionError, Scope as FragmentChainScope}, }; mod error; @@ -78,12 +75,11 @@ struct RelayBlockViewData { struct View { // Active or recent relay-chain blocks by block hash. active_leaves: HashMap, - candidate_storage: HashMap, } impl View { fn new() -> Self { - View { active_leaves: HashMap::new(), candidate_storage: HashMap::new() } + View { active_leaves: HashMap::new() } } } @@ -174,10 +170,6 @@ async fn handle_active_leaves_update( // 3. construct new fragment chain for each para for each new leaf // 4. prune candidate storage. - for deactivated in &update.deactivated { - view.active_leaves.remove(deactivated); - } - let mut temp_header_cache = HashMap::new(); for activated in update.activated.into_iter() { let hash = activated.hash; @@ -217,14 +209,35 @@ async fn handle_active_leaves_update( Some(info) => info, }; - let ancestry = - fetch_ancestry(&mut *ctx, &mut temp_header_cache, hash, allowed_ancestry_len).await?; + let requested_ancestry_len = if allowed_ancestry_len == 0 { + 1 + // We should try requesting at least one, so that we can know the previous leaf. + } else { + allowed_ancestry_len + }; + let mut ancestry = + fetch_ancestry(&mut *ctx, &mut temp_header_cache, hash, requested_ancestry_len).await?; + + let prev_fragment_chains = + ancestry.first().and_then(|prev_leaf| view.active_leaves.get(&prev_leaf.hash)); + + if allowed_ancestry_len == 0 { + // Now, if the allowed ancestry len was 0, clear the one ancestor we requested. + ancestry.clear(); + } // Find constraints. let mut fragment_chains = HashMap::new(); for para in scheduled_paras { - let candidate_storage = - view.candidate_storage.entry(para).or_insert_with(CandidateStorage::default); + let mut prev_candidate_storage = prev_fragment_chains + .map(|chains| { + chains + .fragment_chains + .get(¶) + .map(|chain| chain.as_candidate_storage()) + .unwrap_or_default() + }) + .unwrap_or_default(); let backing_state = fetch_backing_state(&mut *ctx, hash, para).await?; @@ -250,12 +263,12 @@ async fn handle_active_leaves_update( let mut compact_pending = Vec::with_capacity(pending_availability.len()); for c in pending_availability { - let res = candidate_storage.add_candidate( + let candidate_hash = c.compact.candidate_hash; + let res = prev_candidate_storage.add_candidate( c.candidate, c.persisted_validation_data, CandidateState::Backed, ); - let candidate_hash = c.compact.candidate_hash; match res { Ok(_) | Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => {}, @@ -284,6 +297,7 @@ async fn handle_active_leaves_update( ancestry.iter().cloned(), ) .expect("ancestors are provided in reverse order and correctly; qed"); + // TODO: let's not panic here. gum::trace!( target: LOG_TARGET, @@ -293,7 +307,7 @@ async fn handle_active_leaves_update( "Creating fragment chain" ); - let chain = FragmentChain::populate(scope, &*candidate_storage); + let chain = FragmentChain::populate(scope, &mut prev_candidate_storage); gum::trace!( target: LOG_TARGET, @@ -309,56 +323,13 @@ async fn handle_active_leaves_update( view.active_leaves.insert(hash, RelayBlockViewData { fragment_chains }); } - if !update.deactivated.is_empty() { - // This has potential to be a hotspot. - prune_view_candidate_storage(view, metrics); + for deactivated in &update.deactivated { + view.active_leaves.remove(deactivated); } Ok(()) } -fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) { - let _timer = metrics.time_prune_view_candidate_storage(); - - let active_leaves = &view.active_leaves; - let mut live_candidates = HashSet::new(); - let mut live_paras = HashSet::new(); - for sub_view in active_leaves.values() { - for (para_id, fragment_chain) in &sub_view.fragment_chains { - live_candidates.extend(fragment_chain.to_vec()); - live_candidates.extend(fragment_chain.unconnected()); - live_paras.insert(*para_id); - } - } - - view.candidate_storage.retain(|para_id, storage| { - if !live_paras.contains(¶_id) { - return false - } - - storage.retain(|h| live_candidates.contains(&h)); - - // Even if `storage` is now empty, we retain. - // This maintains a convenient invariant that para-id storage exists - // as long as there's an active head which schedules the para. - true - }); - - for (para_id, storage) in view.candidate_storage.iter() { - gum::trace!( - target: LOG_TARGET, - "Keeping a total of {} connected candidates for paraid {} in storage", - storage.candidates().count(), - para_id, - ); - } - - metrics.record_candidate_storage_size( - connected_candidates_count as u64, - live_candidates.len().saturating_sub(connected_candidates_count) as u64, - ); -} - struct ImportablePendingAvailability { candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, @@ -433,89 +404,63 @@ async fn handle_introduce_seconded_candidate( persisted_validation_data: pvd, } = request; + // let Some(storage) = view.candidate_storage.get_mut(¶) else { + // gum::warn!( + // target: LOG_TARGET, + // para_id = ?para, + // ?candidate_hash, + // "Received seconded candidate for inactive para", + // ); + + // let _ = tx.send(false); + // return + // }; // TODO: add this log somehow + let candidate_hash = candidate.hash(); - let Some(storage) = view.candidate_storage.get_mut(¶) else { + let Ok(candidate_entry) = + CandidateEntry::new(candidate_hash, candidate, pvd, CandidateState::Seconded) + else { + // TODO: what if we add more error variants here?. replace this with a match. gum::warn!( target: LOG_TARGET, - para_id = ?para, - ?candidate_hash, - "Received seconded candidate for inactive para", + para = ?para, + "Received seconded candidate had mismatching validation data", ); let _ = tx.send(false); return }; - let parent_head_hash = pvd.parent_head.hash(); - let output_head_hash = Some(candidate.commitments.head_data.hash()); - - let mut add_to_storage = false; - for (relay_parent, leaf_data) in view.active_leaves.iter_mut() { + let mut added = false; + for leaf_data in view.active_leaves.values_mut() { if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { - match chain.add_candidate_as_potential( - &storage, - &candidate_hash, - &candidate.descriptor.relay_parent, - parent_head_hash, - output_head_hash, - ) { - true => { - gum::trace!( - target: LOG_TARGET, - para = ?para, - ?relay_parent, - ?candidate_hash, - "Added potential candidate to storage.", - ); - - add_to_storage = true; + match chain.try_adding_seconded_candidate(&candidate_entry) { + Ok(true) => { + added = true; }, - false => { - gum::trace!( + Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { + gum::debug!( target: LOG_TARGET, para = ?para, - ?relay_parent, - "Not introducing a new candidate: {:?}", + "Attempting to introduce an already known candidate: {:?}", candidate_hash ); + added = true; + }, + Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) => { + // We already checked for this. + let _ = tx.send(false); + return + }, + // TODO: log + Ok(false) => {}, + Err(CandidateStorageInsertionError::Cycle) => { // TODO: log }, } } } - // If there is at least one leaf where this candidate can be added or potentially added in the - // future, add it to storage. - if add_to_storage { - match storage.add_candidate(candidate.clone(), pvd, CandidateState::Seconded) { - Ok(_) => {}, - Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { - gum::debug!( - target: LOG_TARGET, - para = ?para, - "Attempting to introduce an already known candidate: {:?}", - candidate.hash() - ); - // Candidate already known. - let _ = tx.send(true); - return - }, - Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) => { - // We can't log the candidate hash without either doing more ~expensive - // hashing but this branch indicates something is seriously wrong elsewhere - // so it's doubtful that it would affect debugging. - - gum::warn!( - target: LOG_TARGET, - para = ?para, - "Received seconded candidate had mismatching validation data", - ); - // TODO: if this fails, we have an invalid candidate in the unconnected storage. - - let _ = tx.send(false); - return - }, - }; - } else { + if !added { gum::debug!( target: LOG_TARGET, para = ?para, @@ -524,7 +469,7 @@ async fn handle_introduce_seconded_candidate( ); } - let _ = tx.send(add_to_storage); + let _ = tx.send(added); } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] @@ -534,51 +479,47 @@ async fn handle_candidate_backed( para: ParaId, candidate_hash: CandidateHash, ) { - let Some(storage) = view.candidate_storage.get_mut(¶) else { - gum::warn!( - target: LOG_TARGET, - para_id = ?para, - ?candidate_hash, - "Received instruction to back a candidate for unscheduled para", - ); - - return - }; - - if !storage.contains(&candidate_hash) { - gum::warn!( - target: LOG_TARGET, - para_id = ?para, - ?candidate_hash, - "Received instruction to back unknown candidate", - ); - - return - } - - if storage.is_backed(&candidate_hash) { - gum::debug!( - target: LOG_TARGET, - para_id = ?para, - ?candidate_hash, - "Received redundant instruction to mark candidate as backed", - ); - - return - } - - storage.mark_backed(&candidate_hash); + // let Some(storage) = view.candidate_storage.get_mut(¶) else { + // gum::warn!( + // target: LOG_TARGET, + // para_id = ?para, + // ?candidate_hash, + // "Received instruction to back a candidate for unscheduled para", + // ); + + // return + // }; + + // if !storage.contains(&candidate_hash) { + // gum::warn!( + // target: LOG_TARGET, + // para_id = ?para, + // ?candidate_hash, + // "Received instruction to back unknown candidate", + // ); + + // return + // } + + // if storage.is_backed(&candidate_hash) { + // gum::debug!( + // target: LOG_TARGET, + // para_id = ?para, + // ?candidate_hash, + // "Received redundant instruction to mark candidate as backed", + // ); + + // return + // } + // TODO: add these logs back // Now try repopulating the fragment chains. for leaf_data in view.active_leaves.values_mut() { - if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { - chain.repopulate_chain(storage, &candidate_hash); - + if let Some(chain) = leaf_data.fragment_chains.remove(¶) { + leaf_data.fragment_chains.insert(para, chain.candidate_backed(&candidate_hash)); // TODO: log here the added candidates: chain.contains_candidate(candidate) } } - - prune_view_candidate_storage(view, storage); } fn answer_get_backable_candidates( @@ -613,25 +554,25 @@ fn answer_get_backable_candidates( return }; - let Some(storage) = view.candidate_storage.get(¶) else { - gum::warn!( - target: LOG_TARGET, - ?relay_parent, - para_id = ?para, - "No candidate storage for active para", - ); - - let _ = tx.send(vec![]); - return - }; - - gum::trace!( - target: LOG_TARGET, - ?relay_parent, - para_id = ?para, - "Candidate storage for para: {:?}", - storage.candidates().map(|candidate| candidate.hash()).collect::>() - ); + // let Some(storage) = view.candidate_storage.get(¶) else { + // gum::warn!( + // target: LOG_TARGET, + // ?relay_parent, + // para_id = ?para, + // "No candidate storage for active para", + // ); + + // let _ = tx.send(vec![]); + // return + // }; + + // gum::trace!( + // target: LOG_TARGET, + // ?relay_parent, + // para_id = ?para, + // "Candidate storage for para: {:?}", + // storage.candidates().map(|candidate| candidate.hash()).collect::>() + // ); // TODO: add back these logs gum::trace!( target: LOG_TARGET, @@ -641,26 +582,7 @@ fn answer_get_backable_candidates( chain.to_vec() ); - let backable_candidates: Vec<_> = chain - .find_backable_chain(ancestors.clone(), count) - .into_iter() - .filter_map(|child_hash| { - storage.relay_parent_of_candidate(&child_hash).map_or_else( - || { - // Here, we'd actually need to trim all of the candidates that follow. Or - // not, the runtime will do this. Impossible scenario anyway. - gum::error!( - target: LOG_TARGET, - ?child_hash, - para_id = ?para, - "Candidate is present in fragment chain but not in candidate's storage!", - ); - None - }, - |parent_hash| Some((child_hash, parent_hash)), - ) - }) - .collect(); + let backable_candidates = chain.find_backable_chain(ancestors.clone(), count); if backable_candidates.is_empty() { gum::trace!( @@ -705,9 +627,8 @@ fn answer_hypothetical_membership_request( for &mut (ref candidate, ref mut membership) in &mut response { let para_id = &candidate.candidate_para(); let Some(fragment_chain) = leaf_view.fragment_chains.get(para_id) else { continue }; - let Some(candidate_storage) = view.candidate_storage.get(para_id) else { continue }; - if fragment_chain.hypothetical_membership(candidate.clone(), candidate_storage) { + if fragment_chain.hypothetical_membership(candidate.clone()) { membership.push(*active_leaf); } } @@ -743,19 +664,8 @@ fn answer_prospective_validation_data_request( // 4. Also try to find the relay parent block info by scanning fragment chains. // 5. If head data and relay parent block info are found - success. Otherwise, failure. - let storage = match view.candidate_storage.get(&request.para_id) { - None => { - let _ = tx.send(None); - return - }, - Some(s) => s, - }; - let (mut head_data, parent_head_data_hash) = match request.parent_head_data { - ParentHeadData::OnlyHash(parent_head_data_hash) => ( - storage.head_data_by_hash(&parent_head_data_hash).map(|x| x.clone()), - parent_head_data_hash, - ), + ParentHeadData::OnlyHash(parent_head_data_hash) => (None, parent_head_data_hash), ParentHeadData::WithData { head_data, hash } => (Some(head_data), hash), }; @@ -774,10 +684,7 @@ fn answer_prospective_validation_data_request( relay_parent_info = fragment_chain.scope().ancestor(&request.candidate_relay_parent); } if head_data.is_none() { - let required_parent = &fragment_chain.scope().base_constraints().required_parent; - if required_parent.hash() == parent_head_data_hash { - head_data = Some(required_parent.clone()); - } + head_data = fragment_chain.get_head_data_by_hash(&parent_head_data_hash); } if max_pov_size.is_none() { let contains_ancestor = diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 722a97989bce..dbdcabeaea7e 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -43,13 +43,13 @@ use polkadot_node_primitives::{ }; use polkadot_primitives::{ async_backing, slashing, ApprovalVotingParams, AuthorityDiscoveryId, BackedCandidate, - BlockNumber, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, - CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, - GroupRotationInfo, Hash, HeadData, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, MultiDisputeStatementSet, NodeFeatures, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, PvfExecKind, SessionIndex, SessionInfo, - SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, + BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CandidateIndex, + CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, + ExecutorParams, GroupIndex, GroupRotationInfo, Hash, HeadData, Header as BlockHeader, + Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, MultiDisputeStatementSet, + NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, PvfExecKind, + SessionIndex, SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; use polkadot_statement_table::v2::Misbehavior; use std::{ @@ -1116,6 +1116,32 @@ impl HypotheticalCandidate { HypotheticalCandidate::Incomplete { .. } => None, } } + + /// Get the candidate commitments, if the candidate is complete. + pub fn commitments(&self) -> Option<&CandidateCommitments> { + match *self { + HypotheticalCandidate::Complete { ref receipt, .. } => Some(&receipt.commitments), + HypotheticalCandidate::Incomplete { .. } => None, + } + } + + /// Get the persisted validation data, if the candidate is complete. + pub fn persisted_validation_data(&self) -> Option<&PersistedValidationData> { + match *self { + HypotheticalCandidate::Complete { ref persisted_validation_data, .. } => + Some(persisted_validation_data), + HypotheticalCandidate::Incomplete { .. } => None, + } + } + + /// Get the validation code hash, if the candidate is complete. + pub fn validation_code_hash(&self) -> Option<&ValidationCodeHash> { + match *self { + HypotheticalCandidate::Complete { ref receipt, .. } => + Some(&receipt.descriptor.validation_code_hash), + HypotheticalCandidate::Incomplete { .. } => None, + } + } } /// Request specifying which candidates are either already included diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index b5aef325c8b4..fdd5229408da 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -11,6 +11,7 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. +use polkadot_node_subsystem::messages::HypotheticalCandidate; /// # Overview /// /// A set of utilities for node-side code to emulate the logic the runtime uses for checking @@ -118,8 +119,8 @@ /// in practice at most once every few weeks. use polkadot_primitives::{ async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments, - CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, PersistedValidationData, - UpgradeRestriction, ValidationCodeHash, + CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, Hash, HeadData, + Id as ParaId, PersistedValidationData, UpgradeRestriction, ValidationCodeHash, }; use std::{collections::HashMap, sync::Arc}; @@ -708,6 +709,11 @@ impl Fragment { &self.candidate } + /// Get a cheap ref-counted copy of the underlying prospective candidate. + pub fn candidate_clone(&self) -> Arc { + self.candidate.clone() + } + /// Modifications to constraints based on the outputs of the candidate. pub fn constraint_modifications(&self) -> &ConstraintModifications { &self.modifications @@ -797,6 +803,58 @@ fn validate_against_constraints( .map_err(FragmentValidityError::OutputsInvalid) } +pub trait HypotheticalOrConcreteCandidate { + fn commitments(&self) -> Option<&CandidateCommitments>; + fn persisted_validation_data(&self) -> Option<&PersistedValidationData>; + fn validation_code_hash(&self) -> Option<&ValidationCodeHash>; + fn parent_head_data_hash(&self) -> Hash; + fn relay_parent(&self) -> Hash; +} + +impl HypotheticalOrConcreteCandidate for HypotheticalCandidate { + fn commitments(&self) -> Option<&CandidateCommitments> { + self.commitments() + } + + fn persisted_validation_data(&self) -> Option<&PersistedValidationData> { + self.persisted_validation_data() + } + + fn validation_code_hash(&self) -> Option<&ValidationCodeHash> { + self.validation_code_hash() + } + + fn parent_head_data_hash(&self) -> Hash { + self.parent_head_data_hash() + } + + fn relay_parent(&self) -> Hash { + self.relay_parent() + } +} + +impl HypotheticalOrConcreteCandidate for ProspectiveCandidate { + fn commitments(&self) -> Option<&CandidateCommitments> { + Some(&self.commitments) + } + + fn persisted_validation_data(&self) -> Option<&PersistedValidationData> { + Some(&self.persisted_validation_data) + } + + fn validation_code_hash(&self) -> Option<&ValidationCodeHash> { + Some(&self.validation_code_hash) + } + + fn parent_head_data_hash(&self) -> Hash { + self.parent_head_data_hash() + } + + fn relay_parent(&self) -> Hash { + self.relay_parent() + } +} + #[cfg(test)] mod tests { use super::*; From 53f93275e9ec29f77dd2a028498cdedc9aa7f615 Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 2 Jul 2024 17:49:06 +0300 Subject: [PATCH 04/56] some more code --- .../src/fragment_chain/mod.rs | 502 ++++++++++-------- .../core/prospective-parachains/src/lib.rs | 270 ++++++---- .../src/inclusion_emulator/mod.rs | 32 +- 3 files changed, 450 insertions(+), 354 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 04701bdd5492..a6258e4251c3 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -96,28 +96,52 @@ use std::{ }; use super::LOG_TARGET; -use polkadot_node_subsystem::messages::{Ancestors, HypotheticalCandidate}; +use polkadot_node_subsystem::messages::Ancestors; use polkadot_node_subsystem_util::inclusion_emulator::{ - ConstraintModifications, Constraints, Fragment, HypotheticalOrConcreteCandidate, + self, ConstraintModifications, Constraints, Fragment, HypotheticalOrConcreteCandidate, ProspectiveCandidate, RelayChainBlockInfo, }; use polkadot_primitives::{ - BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, - PersistedValidationData, + BlockNumber, CandidateCommitments, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, + Id as ParaId, PersistedValidationData, ValidationCodeHash, }; +use thiserror::Error; const EXTRA_UNCONNECTED_COUNT: usize = 10; -/// Kinds of failures to import a candidate into storage. -#[derive(Debug, Clone, PartialEq)] -pub enum CandidateStorageInsertionError { - /// An error indicating that a supplied candidate didn't match the persisted - /// validation data provided alongside it. - PersistedValidationDataMismatch, - /// The candidate was already known. +/// Fragment chain related errors. +#[derive(Debug, Clone, PartialEq, Error)] +pub(crate) enum Error { + #[error("Candidate already known: {0}")] CandidateAlreadyKnown(CandidateHash), - /// TODO + #[error("Candidate would introduce a zero-length cycle")] + ZeroLengthCycle, + #[error("Candidate would introduce a cycle")] Cycle, + #[error("Candidate would introduce two paths to the same state")] + MultiplePaths, + #[error("Attempting to directly introduce a Backed candidate. It should first be introduced as Seconded: {0}")] + IntroduceBackedCandidate(CandidateHash), + #[error("Too many candidates")] + TooManyCandidates, + #[error("RelayParentPrecedesCandidatePendingAvailability")] + RelayParentPrecedesCandidatePendingAvailability, + #[error("ForkWithCandidatePendingAvailability")] + ForkWithCandidatePendingAvailability, + #[error("ForkChoiceRule")] + ForkChoiceRule, + #[error("ParentCandidateNotFound")] + ParentCandidateNotFound, + #[error("ComputeConstraints: {0:?}")] + ComputeConstraints(inclusion_emulator::ModificationError), + #[error("CheckAgainstConstraints: {0:?}")] + CheckAgainstConstraints(inclusion_emulator::FragmentValidityError), + #[error("RelayParentMovedBackwards")] + RelayParentMovedBackwards, + #[error("CandidateEntry: {0}")] + CandidateEntry(#[from] CandidateEntryError), + #[error("RelayParentNotInScope")] + RelayParentNotInScope, } /// Stores candidates and information about them such as their relay-parents and their backing @@ -133,10 +157,7 @@ pub(crate) struct CandidateStorage { // Index from head data hash to candidate hashes outputting that head data. Purely for // efficiency when responding to `ProspectiveValidationDataRequest`s. - // Even though having multiple candidates with same output would be invalid for a parachain, - // it could happen across different relay chain forks. - by_output_head: HashMap>, - // TODO: I don't think we can have multiple candidates with the same output head really. + by_output_head: HashMap, // Index from candidate hash to fragment node. by_candidate_hash: HashMap, @@ -144,84 +165,73 @@ pub(crate) struct CandidateStorage { impl CandidateStorage { /// Introduce a new candidate. - pub fn add_candidate( + pub fn add_pending_availability_candidate( &mut self, + candidate_hash: CandidateHash, candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, - state: CandidateState, - ) -> Result<(), CandidateStorageInsertionError> { - // Here, do not allow a cycle. - let candidate_hash = candidate.hash(); + ) -> Result<(), Error> { + let entry = CandidateEntry::new( + candidate_hash, + candidate, + persisted_validation_data, + CandidateState::Backed, + )?; + + self.add_candidate_entry(entry) + } + + fn add_candidate_entry(&mut self, candidate: CandidateEntry) -> Result<(), Error> { + let candidate_hash = candidate.candidate_hash; if self.by_candidate_hash.contains_key(&candidate_hash) { - return Err(CandidateStorageInsertionError::CandidateAlreadyKnown(candidate_hash)) + return Err(Error::CandidateAlreadyKnown(candidate_hash)) } - let entry = - CandidateEntry::new(candidate_hash, candidate, persisted_validation_data, state)?; - - // if self.is_cycle(entry.parent_head_data_hash(), entry.output_head_data_hash()) { - // return Err(CandidateStorageInsertionError::Cycle) - // } + self.check_cycles_or_invalid_tree( + &candidate.parent_head_data_hash, + &candidate.output_head_data_hash, + )?; self.by_parent_head - .entry(entry.parent_head_data_hash()) + .entry(candidate.parent_head_data_hash) .or_default() .insert(candidate_hash); - self.by_output_head - .entry(entry.output_head_data_hash()) - .or_default() - .insert(candidate_hash); - // sanity-checked already. - self.by_candidate_hash.insert(candidate_hash, entry); + self.by_output_head.insert(candidate.output_head_data_hash, candidate_hash); + self.by_candidate_hash.insert(candidate_hash, candidate); Ok(()) } - fn add_candidate_entry( - &mut self, - candidate: CandidateEntry, - ) -> Result<(), CandidateStorageInsertionError> { - let candidate_hash = candidate.candidate_hash; - if self.by_candidate_hash.contains_key(&candidate_hash) { - return Err(CandidateStorageInsertionError::CandidateAlreadyKnown(candidate_hash)) + fn check_cycles_or_invalid_tree( + &self, + parent_head_hash: &Hash, + output_head_hash: &Hash, + ) -> Result<(), Error> { + // trivial 0-length cycle. + if parent_head_hash == output_head_hash { + return Err(Error::ZeroLengthCycle) } - // if self.is_cycle(candidate.parent_head_data_hash(), candidate.output_head_data_hash()) { - // return Err(CandidateStorageInsertionError::Cycle) - // } - - self.by_parent_head - .entry(candidate.parent_head_data_hash()) - .or_default() - .insert(candidate_hash); - self.by_output_head - .entry(candidate.output_head_data_hash()) - .or_default() - .insert(candidate_hash); - // sanity-checked already. - self.by_candidate_hash.insert(candidate_hash, candidate); + // multiple paths to the same state, which would break the tree + // assumption. + if self.by_output_head.contains_key(output_head_hash) { + return Err(Error::MultiplePaths) + } Ok(()) } /// Remove a candidate from the store. - pub fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { + fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) { - if let Entry::Occupied(mut e) = self.by_parent_head.entry(entry.parent_head_data_hash()) - { + if let Entry::Occupied(mut e) = self.by_parent_head.entry(entry.parent_head_data_hash) { e.get_mut().remove(&candidate_hash); if e.get().is_empty() { e.remove(); } } - if let Entry::Occupied(mut e) = self.by_output_head.entry(entry.output_head_data_hash()) - { - e.get_mut().remove(&candidate_hash); - if e.get().is_empty() { - e.remove(); - } - } + self.by_output_head.remove(&entry.output_head_data_hash); } } @@ -238,17 +248,17 @@ impl CandidateStorage { } /// Whether a candidate is contained within the storage already. - pub fn contains(&self, candidate_hash: &CandidateHash) -> bool { + fn contains(&self, candidate_hash: &CandidateHash) -> bool { self.by_candidate_hash.contains_key(candidate_hash) } /// Return an iterator over the stored candidates. - pub fn candidates(&self) -> impl Iterator { + fn candidates(&self) -> impl Iterator { self.by_candidate_hash.values() } /// Get head-data by hash. - pub(crate) fn head_data_by_hash(&self, hash: &Hash) -> Option<&HeadData> { + fn head_data_by_hash(&self, hash: &Hash) -> Option<&HeadData> { // First, search for candidates outputting this head data and extract the head data // from their commitments if they exist. // @@ -256,7 +266,6 @@ impl CandidateStorage { // from their persisted validation data if they exist. self.by_output_head .get(hash) - .and_then(|m| m.iter().next()) .and_then(|a_candidate| self.by_candidate_hash.get(a_candidate)) .map(|e| &e.candidate.commitments.head_data) .or_else(|| { @@ -288,7 +297,7 @@ impl CandidateStorage { }) } - pub fn len(&self) -> usize { + fn len(&self) -> usize { self.by_candidate_hash.len() } } @@ -297,13 +306,19 @@ impl CandidateStorage { /// /// Candidates aren't even considered until they've at least been seconded. #[derive(Debug, PartialEq, Clone)] -pub(crate) enum CandidateState { +enum CandidateState { /// The candidate has been seconded. Seconded, /// The candidate has been completely backed by the group. Backed, } +#[derive(Debug, Clone, PartialEq, Error)] +pub enum CandidateEntryError { + #[error("Candidate does not match the persisted validation data provided alongside it")] + PersistedValidationDataMismatch, +} + #[derive(Debug, Clone)] pub(crate) struct CandidateEntry { candidate_hash: CandidateHash, @@ -315,14 +330,22 @@ pub(crate) struct CandidateEntry { } impl CandidateEntry { - pub fn new( + pub fn new_seconded( + candidate_hash: CandidateHash, + candidate: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + ) -> Result { + Self::new(candidate_hash, candidate, persisted_validation_data, CandidateState::Seconded) + } + + fn new( candidate_hash: CandidateHash, candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, state: CandidateState, - ) -> Result { + ) -> Result { if persisted_validation_data.hash() != candidate.descriptor.persisted_validation_data_hash { - return Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) + return Err(CandidateEntryError::PersistedValidationDataMismatch) } Ok(Self { @@ -342,12 +365,38 @@ impl CandidateEntry { }) } - pub fn parent_head_data_hash(&self) -> Hash { + pub fn hash(&self) -> CandidateHash { + self.candidate_hash + } +} + +impl HypotheticalOrConcreteCandidate for &CandidateEntry { + fn commitments(&self) -> Option<&CandidateCommitments> { + Some(&self.candidate.commitments) + } + + fn persisted_validation_data(&self) -> Option<&PersistedValidationData> { + Some(&self.candidate.persisted_validation_data) + } + + fn validation_code_hash(&self) -> Option<&ValidationCodeHash> { + Some(&self.candidate.validation_code_hash) + } + + fn parent_head_data_hash(&self) -> Hash { self.parent_head_data_hash } - pub fn output_head_data_hash(&self) -> Hash { - self.output_head_data_hash + fn output_head_data_hash(&self) -> Option { + Some(self.output_head_data_hash) + } + + fn relay_parent(&self) -> Hash { + self.relay_parent + } + + fn candidate_hash(&self) -> CandidateHash { + self.candidate_hash } } @@ -364,8 +413,6 @@ pub(crate) struct PendingAvailability { /// The scope of a [`FragmentChain`]. #[derive(Debug, Clone)] pub(crate) struct Scope { - /// The assigned para id of this `FragmentChain`. - para: ParaId, /// The relay parent we're currently building on top of. relay_parent: RelayChainBlockInfo, /// The other relay parents candidates are allowed to build upon, mapped by the block number. @@ -383,10 +430,14 @@ pub(crate) struct Scope { /// An error variant indicating that ancestors provided to a scope /// had unexpected order. #[derive(Debug)] -pub struct UnexpectedAncestor { +pub(crate) struct UnexpectedAncestor { /// The block number that this error occurred at. + /// Allow as dead code, but it's being read in logs. + #[allow(dead_code)] pub number: BlockNumber, /// The previous seen block number, which did not match `number`. + /// Allow as dead code, but it's being read in logs. + #[allow(dead_code)] pub prev: BlockNumber, } @@ -408,7 +459,6 @@ impl Scope { /// /// It is allowed to provide zero ancestors. pub fn with_ancestors( - para: ParaId, relay_parent: RelayChainBlockInfo, base_constraints: Constraints, pending_availability: Vec, @@ -435,7 +485,6 @@ impl Scope { } Ok(Scope { - para, relay_parent, base_constraints, pending_availability, @@ -464,7 +513,7 @@ impl Scope { } /// Whether the candidate in question is one pending availability in this scope. - pub fn get_pending_availability( + fn get_pending_availability( &self, candidate_hash: &CandidateHash, ) -> Option<&PendingAvailability> { @@ -513,15 +562,6 @@ pub(crate) struct FragmentChain { impl FragmentChain { /// Create a new [`FragmentChain`] with given scope and populated from the storage. pub fn populate(scope: Scope, parent_candidates: &mut CandidateStorage) -> Self { - gum::trace!( - target: LOG_TARGET, - relay_parent = ?scope.relay_parent.hash, - relay_parent_num = scope.relay_parent.number, - para_id = ?scope.para, - ancestors = scope.ancestors.len(), - "Instantiating Fragment Chain", - ); - let mut fragment_chain = Self { scope, chain: Vec::new(), @@ -547,83 +587,50 @@ impl FragmentChain { } /// Returns the number of candidates in the chain - pub(crate) fn len(&self) -> usize { + pub fn len(&self) -> usize { self.candidates.len() } /// Whether the candidate exists. - pub(crate) fn contains_candidate(&self, candidate: &CandidateHash) -> bool { + pub fn contains_candidate(&self, candidate: &CandidateHash) -> bool { self.candidates.contains(candidate) } + /// Whether the candidate exists. + pub fn contains_unconnected_candidate(&self, candidate: &CandidateHash) -> bool { + self.unconnected.contains(candidate) + } + /// Return a vector of the chain's candidate hashes, in-order. - pub(crate) fn to_vec(&self) -> Vec { + pub fn to_vec(&self) -> Vec { self.chain.iter().map(|candidate| candidate.candidate_hash).collect() } + /// Return a vector of the chain's candidate hashes, in-order. + pub fn unconnected(&self) -> impl Iterator { + self.unconnected.candidates() + } + pub fn as_candidate_storage(&self) -> CandidateStorage { let mut storage = self.unconnected.clone(); for candidate in self.chain.iter() { - let _ = storage.add_candidate_entry(CandidateEntry { + let Ok(()) = storage.add_candidate_entry(CandidateEntry { candidate_hash: candidate.candidate_hash, parent_head_data_hash: candidate.parent_head_data_hash, output_head_data_hash: candidate.output_head_data_hash, relay_parent: candidate.relay_parent(), candidate: candidate.fragment.candidate_clone(), // This clone is very cheap. state: CandidateState::Backed, - }); // TODO: is it really ok to hide the error here? + }) else { + continue + }; } storage } - /// Returns the hypothetical state of a candidate with the given hash and parent head data - /// in regards to the existing chain. - /// - /// Returns true if either: - /// - the candidate is already present - /// - the candidate can be added to the chain - /// - the candidate could potentially be added to the chain in the future (its ancestors are - /// still unknown but it doesn't violate other rules). - /// - /// If this returns false, the candidate could never be added to the current chain (not now, not - /// ever) - pub(crate) fn hypothetical_membership(&self, candidate: HypotheticalCandidate) -> bool { - if let Some(output_head_hash) = candidate.output_head_data_hash() { - if self.is_cycle(candidate.parent_head_data_hash(), output_head_hash) { - return false; - } - } - - let candidate_hash = candidate.candidate_hash(); - - // If we've already used this candidate in the chain or in the unconnected storage. - if self.candidates.contains(&candidate_hash) || self.unconnected.contains(&candidate_hash) { - return true - } - - self.can_add_candidate_as_potential(&candidate_hash, &candidate) - } - - // TODO: this needs modifying. - fn is_cycle(&self, parent_head_hash: Hash, output_head_hash: Hash) -> bool { - // trivial 0-length cycle. - if parent_head_hash == output_head_hash { - return true - } - - // this should catch any other cycles. our output state cannot already be the parent - // state of another candidate, unless this is a cycle, since the already added - // candidates form a chain. - if self.by_parent_head.contains_key(&output_head_hash) { - return true - } - - false - } - - pub(crate) fn get_head_data_by_hash(&self, head_data_hash: &Hash) -> Option { + pub fn get_head_data_by_hash(&self, head_data_hash: &Hash) -> Option { let required_parent = &self.scope.base_constraints().required_parent; if &required_parent.hash() == head_data_hash { return Some(required_parent.clone()) @@ -662,7 +669,7 @@ impl FragmentChain { /// The intention of the `ancestors` is to allow queries on the basis of /// one or more candidates which were previously pending availability becoming /// available or candidates timing out. - pub(crate) fn find_backable_chain( + pub fn find_backable_chain( &self, ancestors: Ancestors, count: u32, @@ -741,40 +748,40 @@ impl FragmentChain { // present in the `CandidateStorage`. // Even if the candidate is a potential candidate, this function will indicate that it can be // kept only if there's enough room for it. - fn can_add_candidate_as_potential( + pub fn can_add_candidate_as_potential( &self, - candidate_hash: &CandidateHash, candidate: &impl HypotheticalOrConcreteCandidate, - ) -> bool { - if !self.check_potential(candidate_hash, candidate) { - return false + ) -> Result<(), Error> { + let candidate_hash = candidate.candidate_hash(); + if self.candidates.contains(&candidate_hash) || self.unconnected.contains(&candidate_hash) { + return Err(Error::CandidateAlreadyKnown(candidate_hash)) } - // TODO: chain len < max_depth OR unconnected < EXTRA_UNCONNECTED_COUNT. we shouldn't mix - // the two?? NO. here we should have the max number of candidates in the tree! - (self.chain.len() + self.unconnected.by_candidate_hash.len()) <= + if (self.chain.len() + self.unconnected.len()) > (self.scope.max_depth + EXTRA_UNCONNECTED_COUNT) + { + return Err(Error::TooManyCandidates) + } + + self.check_potential(candidate) } - pub(crate) fn try_adding_seconded_candidate( + pub fn try_adding_seconded_candidate( &mut self, candidate: &CandidateEntry, - ) -> Result { - if self.candidates.contains(&candidate.candidate_hash) || - self.unconnected.contains(&candidate.candidate_hash) - { - return Err(CandidateStorageInsertionError::CandidateAlreadyKnown( - candidate.candidate_hash, - )) + ) -> Result<(), Error> { + if candidate.state == CandidateState::Backed { + return Err(Error::IntroduceBackedCandidate(candidate.candidate_hash)); } - if self.can_add_candidate_as_potential(&candidate.candidate_hash, &*candidate.candidate) { + let res = self.can_add_candidate_as_potential(&candidate); + + if res.is_ok() { // This clone is cheap, as it uses an Arc for the expensive stuff. self.unconnected.add_candidate_entry(candidate.clone())?; - Ok(true) - } else { - Ok(false) } + + res } // The candidates which are present in `CandidateStorage`, are not part of this chain but could @@ -788,22 +795,52 @@ impl FragmentChain { if self.scope.get_pending_availability(&candidate.candidate_hash).is_some() { continue } - // We stop at max_depth + 1 with the search. There's no point in looping further. - if (self.chain.len() + self.unconnected.len()) > - (self.scope.max_depth + EXTRA_UNCONNECTED_COUNT) - { - break - } - if !self.candidates.contains(&candidate.candidate_hash) && - self.check_potential(&candidate.candidate_hash, &*candidate.candidate) - { - // This is cheap because the expensive stuff is wrapped in an Arc - let _ = self.unconnected.add_candidate_entry(candidate.clone()); // TODO: is it ok to hide - // result here? - } + + let res = self.can_add_candidate_as_potential(&candidate); + + match res { + Err(Error::TooManyCandidates) => break, + Ok(()) => { + // This clone is cheap because the expensive stuff is wrapped in an Arc + let Ok(()) = self.unconnected.add_candidate_entry(candidate.clone()) else { + continue + }; + }, + // Swallow these errors as they can legitimately happen when pruning stale + // candidates. + Err(_) => {}, + }; } } + fn check_cycles_or_invalid_tree( + &self, + parent_head_hash: &Hash, + output_head_hash: &Hash, + ) -> Result<(), Error> { + self.unconnected + .check_cycles_or_invalid_tree(parent_head_hash, output_head_hash)?; + + // trivial 0-length cycle. + if parent_head_hash == output_head_hash { + return Err(Error::ZeroLengthCycle) + } + + // this should catch a cycle where this candidate would point back to the parent of some + // candidate in the chain. + if self.by_parent_head.contains_key(output_head_hash) { + return Err(Error::Cycle) + } + + // multiple paths to the same state, which would break the tree + // assumption. + if self.by_output_head.contains_key(output_head_hash) { + return Err(Error::MultiplePaths) + } + + Ok(()) + } + // Checks the potential of a candidate to be added to the chain in the future. // Verifies that the relay parent is in scope and not moving backwards and that we're not // introducing forks or cycles with other candidates in the chain. @@ -811,52 +848,57 @@ impl FragmentChain { // collation. fn check_potential( &self, - candidate_hash: &CandidateHash, candidate: &impl HypotheticalOrConcreteCandidate, - ) -> bool { + ) -> Result<(), Error> { let relay_parent = candidate.relay_parent(); let parent_head_hash = candidate.parent_head_data_hash(); - let Some(relay_parent) = self.scope.ancestor(&relay_parent) else { return false }; + let Some(relay_parent) = self.scope.ancestor(&relay_parent) else { + return Err(Error::RelayParentNotInScope) + }; let earliest_rp = self.earliest_relay_parent_pending_availability(); if relay_parent.number < earliest_rp.number { - return false // relay parent moved backwards. + return Err(Error::RelayParentPrecedesCandidatePendingAvailability) // relay parent moved + // backwards. } - let constraints = if let Some(parent_candidate) = self.by_output_head.get(&parent_head_hash) - { - // Check if it's a fork. - if let Some(other_candidate) = self.by_parent_head.get(&parent_head_hash) { - if self.scope().get_pending_availability(other_candidate).is_some() { - // Cannot accept a fork with a candidate pending availability. - return false - } + // Check if it's a fork with a backed candidate. + if let Some(other_candidate) = self.by_parent_head.get(&parent_head_hash) { + if self.scope().get_pending_availability(other_candidate).is_some() { + // Cannot accept a fork with a candidate pending availability. + return Err(Error::ForkWithCandidatePendingAvailability) + } - // If the candidate is backed and in the current chain, accept only a candidate with - // a lower hash. - if other_candidate < candidate_hash { - return false - } + // If the candidate is backed and in the current chain, accept only a candidate with + // a lower hash. + if other_candidate < &candidate.candidate_hash() { + return Err(Error::ForkChoiceRule) } + } + + // Check for cycles or invalid tree transitions. + if let Some(ref output_head_hash) = candidate.output_head_data_hash() { + self.check_cycles_or_invalid_tree(&parent_head_hash, output_head_hash)?; + } + let constraints = if let Some(parent_candidate) = self.by_output_head.get(&parent_head_hash) + { let Some(parent_candidate) = self.chain.iter().find(|c| &c.candidate_hash == parent_candidate) else { - return false + return Err(Error::ParentCandidateNotFound) }; - let Ok(constraints) = self - .scope + self.scope .base_constraints .apply_modifications(&parent_candidate.cumulative_modifications) - else { - return false - }; - constraints + .map_err(Error::ComputeConstraints)? // Check if it builds on the latest included candidate } else if self.scope.base_constraints.required_parent.hash() == parent_head_hash { self.scope.base_constraints.clone() } else { - return true + // If the parent is not yet part of the chain, there's nothing else we can check for + // now. + return Ok(()) }; // We do additional checks for complete candidates. @@ -865,42 +907,48 @@ impl FragmentChain { candidate.persisted_validation_data(), candidate.validation_code_hash(), ) { - if Fragment::check_against_constraints( + Fragment::check_against_constraints( &relay_parent, &constraints, commitments, validation_code_hash, pvd, ) - .is_err() - { - gum::debug!( - target: LOG_TARGET, - "Fragment::check_against_constraints() returned error", - ); - return false - } - } else { - // Otherwise, at least check the relay parent progresses. - return relay_parent.number >= constraints.min_relay_parent_number + .map_err(Error::CheckAgainstConstraints)?; + // Otherwise, at least check the relay parent progresses. + } else if relay_parent.number < constraints.min_relay_parent_number { + return Err(Error::RelayParentMovedBackwards) } - true + Ok(()) } fn trim_uneligible_forks(&self, storage: &mut CandidateStorage) { - let mut queue: VecDeque<_> = self.chain.iter().map(|c| c.parent_head_data_hash).collect(); + let mut queue: VecDeque<_> = + self.chain.iter().map(|c| (c.parent_head_data_hash, true)).collect(); + let mut visited = HashSet::new(); + + while let Some((parent, parent_has_potential)) = queue.pop_front() { + visited.insert(parent); - // TODO: if there's a cycle or multiple paths to the same node, this will loop forever. - while let Some(parent) = queue.pop_front() { let Some(children) = storage.by_parent_head.get(&parent) else { continue }; let mut to_remove = vec![]; for child_hash in children.iter() { let Some(child) = storage.by_candidate_hash.get(child_hash) else { continue }; - if !self.check_potential(child_hash, &*child.candidate) { + + // Detected a cycle. Stop now to avoid looping forever. + // Remove the candidate that creates the cycle. + if visited.contains(&child.output_head_data_hash) { + to_remove.push(*child_hash); + continue + } + + if parent_has_potential && self.check_potential(&child).is_ok() { + queue.push_back((child.output_head_data_hash, true)); + } else { to_remove.push(*child_hash); - queue.push_back(child.output_head_data_hash); + queue.push_back((child.output_head_data_hash, false)); } } @@ -964,6 +1012,16 @@ impl FragmentChain { return None }; + if self + .check_cycles_or_invalid_tree( + &candidate.parent_head_data_hash, + &candidate.output_head_data_hash, + ) + .is_err() + { + return None + } + // require: candidates don't move backwards // and only pending availability candidates can be out-of-scope. // @@ -1071,8 +1129,6 @@ impl FragmentChain { return self } - // TODO: if is already in chain, it's an error. - let mut old_storage = self.as_candidate_storage(); Self::populate(self.scope, &mut old_storage) } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 92d55e91fa20..95b7b444e3ff 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -28,7 +28,6 @@ use std::collections::{HashMap, HashSet}; -use fragment_chain::{CandidateEntry, FragmentChain}; use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ @@ -54,7 +53,9 @@ use polkadot_primitives::{ use crate::{ error::{FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, - fragment_chain::{CandidateState, CandidateStorageInsertionError, Scope as FragmentChainScope}, + fragment_chain::{ + CandidateEntry, Error as FragmentChainError, FragmentChain, Scope as FragmentChainScope, + }, }; mod error; @@ -264,14 +265,14 @@ async fn handle_active_leaves_update( for c in pending_availability { let candidate_hash = c.compact.candidate_hash; - let res = prev_candidate_storage.add_candidate( + let res = prev_candidate_storage.add_pending_availability_candidate( + candidate_hash, c.candidate, c.persisted_validation_data, - CandidateState::Backed, ); match res { - Ok(_) | Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => {}, + Ok(_) | Err(FragmentChainError::CandidateAlreadyKnown(_)) => {}, Err(err) => { gum::warn!( target: LOG_TARGET, @@ -288,22 +289,34 @@ async fn handle_active_leaves_update( compact_pending.push(c.compact); } - let scope = FragmentChainScope::with_ancestors( - para, + let scope = match FragmentChainScope::with_ancestors( block_info.clone(), constraints, compact_pending, max_candidate_depth, ancestry.iter().cloned(), - ) - .expect("ancestors are provided in reverse order and correctly; qed"); - // TODO: let's not panic here. + ) { + Ok(scope) => scope, + Err(unexpected_ancestors) => { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + max_candidate_depth, + ?ancestry, + leaf = ?hash, + "Relay chain ancestors have wrong order: {:?}", + unexpected_ancestors + ); + continue + }, + }; gum::trace!( target: LOG_TARGET, relay_parent = ?hash, min_relay_parent = scope.earliest_relay_parent().number, para_id = ?para, + ancestors = ?ancestry, "Creating fragment chain" ); @@ -313,8 +326,17 @@ async fn handle_active_leaves_update( target: LOG_TARGET, relay_parent = ?hash, para_id = ?para, - "Populated fragment chain with {} candidates", - chain.len() + "Populated fragment chain with {} candidates: {:?}", + chain.len(), + chain.to_vec() + ); + + gum::trace!( + target: LOG_TARGET, + relay_parent = ?hash, + para_id = ?para, + "Potential candidate storage for para: {:?}", + chain.unconnected().map(|candidate| candidate.hash()).collect::>() ); fragment_chains.insert(para, chain); @@ -333,7 +355,7 @@ async fn handle_active_leaves_update( struct ImportablePendingAvailability { candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, - compact: crate::fragment_chain::PendingAvailability, + compact: fragment_chain::PendingAvailability, } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] @@ -376,7 +398,7 @@ async fn preprocess_candidates_pending_availability( relay_parent_number: relay_parent.number, relay_parent_storage_root: relay_parent.storage_root, }, - compact: crate::fragment_chain::PendingAvailability { + compact: fragment_chain::PendingAvailability { candidate_hash: pending.candidate_hash, relay_parent, }, @@ -404,62 +426,72 @@ async fn handle_introduce_seconded_candidate( persisted_validation_data: pvd, } = request; - // let Some(storage) = view.candidate_storage.get_mut(¶) else { - // gum::warn!( - // target: LOG_TARGET, - // para_id = ?para, - // ?candidate_hash, - // "Received seconded candidate for inactive para", - // ); - - // let _ = tx.send(false); - // return - // }; // TODO: add this log somehow - let candidate_hash = candidate.hash(); - let Ok(candidate_entry) = - CandidateEntry::new(candidate_hash, candidate, pvd, CandidateState::Seconded) - else { - // TODO: what if we add more error variants here?. replace this with a match. - gum::warn!( - target: LOG_TARGET, - para = ?para, - "Received seconded candidate had mismatching validation data", - ); + let candidate_entry = match CandidateEntry::new_seconded(candidate_hash, candidate, pvd) { + Ok(candidate) => candidate, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + para = ?para, + "Cannot add seconded candidate: {}", + err + ); - let _ = tx.send(false); - return + let _ = tx.send(false); + return + }, }; let mut added = false; - for leaf_data in view.active_leaves.values_mut() { + let mut para_scheduled = false; + for (leaf, leaf_data) in view.active_leaves.iter_mut() { if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { + para_scheduled = true; + match chain.try_adding_seconded_candidate(&candidate_entry) { - Ok(true) => { + Ok(()) => { + gum::debug!( + target: LOG_TARGET, + para = ?para, + relay_parent = ?leaf, + "Added seconded candidate {:?}", + candidate_hash + ); added = true; }, - Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { + Err(FragmentChainError::CandidateAlreadyKnown(_)) => { gum::debug!( target: LOG_TARGET, para = ?para, + relay_parent = ?leaf, "Attempting to introduce an already known candidate: {:?}", candidate_hash ); added = true; }, - Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) => { - // We already checked for this. - let _ = tx.send(false); - return - }, - // TODO: log - Ok(false) => {}, - Err(CandidateStorageInsertionError::Cycle) => { // TODO: log + Err(err) => { + gum::debug!( + target: LOG_TARGET, + para = ?para, + relay_parent = ?leaf, + ?candidate_hash, + "Cannot introduce seconded candidate: {}", + err + ) }, } } } + if !para_scheduled { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received seconded candidate for inactive para", + ); + } + if !added { gum::debug!( target: LOG_TARGET, @@ -479,47 +511,66 @@ async fn handle_candidate_backed( para: ParaId, candidate_hash: CandidateHash, ) { - // let Some(storage) = view.candidate_storage.get_mut(¶) else { - // gum::warn!( - // target: LOG_TARGET, - // para_id = ?para, - // ?candidate_hash, - // "Received instruction to back a candidate for unscheduled para", - // ); - - // return - // }; - - // if !storage.contains(&candidate_hash) { - // gum::warn!( - // target: LOG_TARGET, - // para_id = ?para, - // ?candidate_hash, - // "Received instruction to back unknown candidate", - // ); - - // return - // } - - // if storage.is_backed(&candidate_hash) { - // gum::debug!( - // target: LOG_TARGET, - // para_id = ?para, - // ?candidate_hash, - // "Received redundant instruction to mark candidate as backed", - // ); - - // return - // } - // TODO: add these logs back - - // Now try repopulating the fragment chains. - for leaf_data in view.active_leaves.values_mut() { + // Repopulate the fragment chains. + let mut found_candidate = false; + let mut found_para = false; + for (leaf, leaf_data) in view.active_leaves.iter_mut() { if let Some(chain) = leaf_data.fragment_chains.remove(¶) { - leaf_data.fragment_chains.insert(para, chain.candidate_backed(&candidate_hash)); - // TODO: log here the added candidates: chain.contains_candidate(candidate) + found_para = true; + if chain.contains_candidate(&candidate_hash) { + gum::debug!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received redundant instruction to mark as backed an already backed candidate", + ); + found_candidate = true; + } + + if chain.contains_unconnected_candidate(&candidate_hash) { + found_candidate = true; + let new_chain = chain.candidate_backed(&candidate_hash); + + gum::trace!( + target: LOG_TARGET, + relay_parent = ?leaf, + para_id = ?para, + "Candidate chain for para: {:?}", + new_chain.to_vec() + ); + + gum::trace!( + target: LOG_TARGET, + relay_parent = ?leaf, + para_id = ?para, + "Potential candidate storage for para: {:?}", + new_chain.unconnected().map(|candidate| candidate.hash()).collect::>() + ); + + leaf_data.fragment_chains.insert(para, new_chain); + } } } + + if !found_para { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received instruction to back a candidate for unscheduled para", + ); + + return + } + + if !found_candidate { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received instruction to back unknown candidate", + ); + } } fn answer_get_backable_candidates( @@ -554,26 +605,6 @@ fn answer_get_backable_candidates( return }; - // let Some(storage) = view.candidate_storage.get(¶) else { - // gum::warn!( - // target: LOG_TARGET, - // ?relay_parent, - // para_id = ?para, - // "No candidate storage for active para", - // ); - - // let _ = tx.send(vec![]); - // return - // }; - - // gum::trace!( - // target: LOG_TARGET, - // ?relay_parent, - // para_id = ?para, - // "Candidate storage for para: {:?}", - // storage.candidates().map(|candidate| candidate.hash()).collect::>() - // ); // TODO: add back these logs - gum::trace!( target: LOG_TARGET, ?relay_parent, @@ -582,6 +613,14 @@ fn answer_get_backable_candidates( chain.to_vec() ); + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Potential candidate storage for para: {:?}", + chain.unconnected().map(|candidate| candidate.hash()).collect::>() + ); + let backable_candidates = chain.find_backable_chain(ancestors.clone(), count); if backable_candidates.is_empty() { @@ -628,9 +667,22 @@ fn answer_hypothetical_membership_request( let para_id = &candidate.candidate_para(); let Some(fragment_chain) = leaf_view.fragment_chains.get(para_id) else { continue }; - if fragment_chain.hypothetical_membership(candidate.clone()) { - membership.push(*active_leaf); - } + let res = fragment_chain.can_add_candidate_as_potential(candidate); + match res { + Err(FragmentChainError::CandidateAlreadyKnown(_)) | Ok(()) => { + membership.push(*active_leaf); + }, + Err(err) => { + gum::debug!( + target: LOG_TARGET, + para = ?para_id, + leaf = ?active_leaf, + candidate = ?candidate.candidate_hash(), + "Candidate is not a hypothetical member: {:?}", + err + ) + }, + }; } } diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index fdd5229408da..666f2df11aa8 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -119,8 +119,8 @@ use polkadot_node_subsystem::messages::HypotheticalCandidate; /// in practice at most once every few weeks. use polkadot_primitives::{ async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments, - CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, Hash, HeadData, - Id as ParaId, PersistedValidationData, UpgradeRestriction, ValidationCodeHash, + CandidateHash, CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, + PersistedValidationData, UpgradeRestriction, ValidationCodeHash, }; use std::{collections::HashMap, sync::Arc}; @@ -808,7 +808,9 @@ pub trait HypotheticalOrConcreteCandidate { fn persisted_validation_data(&self) -> Option<&PersistedValidationData>; fn validation_code_hash(&self) -> Option<&ValidationCodeHash>; fn parent_head_data_hash(&self) -> Hash; + fn output_head_data_hash(&self) -> Option; fn relay_parent(&self) -> Hash; + fn candidate_hash(&self) -> CandidateHash; } impl HypotheticalOrConcreteCandidate for HypotheticalCandidate { @@ -828,31 +830,17 @@ impl HypotheticalOrConcreteCandidate for HypotheticalCandidate { self.parent_head_data_hash() } - fn relay_parent(&self) -> Hash { - self.relay_parent() - } -} - -impl HypotheticalOrConcreteCandidate for ProspectiveCandidate { - fn commitments(&self) -> Option<&CandidateCommitments> { - Some(&self.commitments) - } - - fn persisted_validation_data(&self) -> Option<&PersistedValidationData> { - Some(&self.persisted_validation_data) - } - - fn validation_code_hash(&self) -> Option<&ValidationCodeHash> { - Some(&self.validation_code_hash) - } - - fn parent_head_data_hash(&self) -> Hash { - self.parent_head_data_hash() + fn output_head_data_hash(&self) -> Option { + self.output_head_data_hash() } fn relay_parent(&self) -> Hash { self.relay_parent() } + + fn candidate_hash(&self) -> CandidateHash { + self.candidate_hash() + } } #[cfg(test)] From 67d6887d725f4981fdaafc270c3e2260372084cc Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 3 Jul 2024 17:38:40 +0300 Subject: [PATCH 05/56] bugfixes --- .gitlab/pipeline/check.yml | 2 +- .gitlab/pipeline/test.yml | 18 +-- .../src/fragment_chain/mod.rs | 115 +++++++++--------- .../core/prospective-parachains/src/lib.rs | 44 ++++++- polkadot/node/core/provisioner/src/lib.rs | 6 +- .../statement-distribution/src/v2/mod.rs | 4 +- 6 files changed, 110 insertions(+), 79 deletions(-) diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 2b8b90ef19a4..53bd8419dbc1 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -3,7 +3,7 @@ cargo-clippy: extends: - .docker-env - .common-refs - - .pipeline-stopper-artifacts + # - .pipeline-stopper-artifacts variables: RUSTFLAGS: "-D warnings" script: diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index d171a8a19426..518953e5e288 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -32,7 +32,7 @@ codecov-start: extends: - .kubernetes-env - .common-refs - - .pipeline-stopper-artifacts + # - .pipeline-stopper-artifacts - .run-immediately script: - !reference [.codecov-check, script] @@ -53,7 +53,7 @@ codecov-finish: extends: - .kubernetes-env - .common-refs - - .pipeline-stopper-artifacts + # - .pipeline-stopper-artifacts needs: - test-linux-stable-codecov script: @@ -72,7 +72,7 @@ test-linux-stable-codecov: extends: - .docker-env - .common-refs - - .pipeline-stopper-artifacts + # - .pipeline-stopper-artifacts variables: CI_IMAGE: europe-docker.pkg.dev/parity-build/ci-images/ci-unified:bullseye-1.77.0 RUST_TOOLCHAIN: stable @@ -118,7 +118,7 @@ test-linux-stable: - .docker-env - .common-refs - .run-immediately - - .pipeline-stopper-artifacts + # - .pipeline-stopper-artifacts variables: RUST_TOOLCHAIN: stable # Enable debug assertions since we are running optimized builds for testing @@ -167,7 +167,7 @@ test-linux-stable-runtime-benchmarks: - .docker-env - .common-refs - .run-immediately - - .pipeline-stopper-artifacts + # - .pipeline-stopper-artifacts variables: RUST_TOOLCHAIN: stable # Enable debug assertions since we are running optimized builds for testing @@ -303,7 +303,7 @@ cargo-check-benches: - .common-refs - .run-immediately - .collect-artifacts - - .pipeline-stopper-artifacts + # - .pipeline-stopper-artifacts before_script: # TODO: DON'T FORGET TO CHANGE FOR PROD VALUES!!! # merges in the master branch on PRs. skip if base is not master @@ -463,7 +463,7 @@ test-linux-stable-int: - .docker-env - .common-refs - .run-immediately - - .pipeline-stopper-artifacts + # - .pipeline-stopper-artifacts variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. @@ -485,7 +485,7 @@ check-tracing: - .docker-env - .common-refs - .run-immediately - - .pipeline-stopper-artifacts + # - .pipeline-stopper-artifacts script: # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases - time cargo test --locked --manifest-path ./substrate/primitives/tracing/Cargo.toml --no-default-features @@ -498,7 +498,7 @@ check-metadata-hash: - .docker-env - .common-refs - .run-immediately - - .pipeline-stopper-artifacts + # - .pipeline-stopper-artifacts script: - time cargo build --locked -p westend-runtime --features metadata-hash diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index a6258e4251c3..1a72bd6c522c 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -103,7 +103,7 @@ use polkadot_node_subsystem_util::inclusion_emulator::{ }; use polkadot_primitives::{ BlockNumber, CandidateCommitments, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, - Id as ParaId, PersistedValidationData, ValidationCodeHash, + PersistedValidationData, ValidationCodeHash, }; use thiserror::Error; @@ -114,6 +114,8 @@ const EXTRA_UNCONNECTED_COUNT: usize = 10; pub(crate) enum Error { #[error("Candidate already known: {0}")] CandidateAlreadyKnown(CandidateHash), + #[error("Candidate already pending availability: {0}")] + CandidateAlreadyPendingAvailability(CandidateHash), #[error("Candidate would introduce a zero-length cycle")] ZeroLengthCycle, #[error("Candidate would introduce a cycle")] @@ -157,7 +159,7 @@ pub(crate) struct CandidateStorage { // Index from head data hash to candidate hashes outputting that head data. Purely for // efficiency when responding to `ProspectiveValidationDataRequest`s. - by_output_head: HashMap, + by_output_head: HashMap>, // Index from candidate hash to fragment node. by_candidate_hash: HashMap, @@ -181,46 +183,25 @@ impl CandidateStorage { self.add_candidate_entry(entry) } - fn add_candidate_entry(&mut self, candidate: CandidateEntry) -> Result<(), Error> { + pub fn add_candidate_entry(&mut self, candidate: CandidateEntry) -> Result<(), Error> { let candidate_hash = candidate.candidate_hash; if self.by_candidate_hash.contains_key(&candidate_hash) { return Err(Error::CandidateAlreadyKnown(candidate_hash)) } - self.check_cycles_or_invalid_tree( - &candidate.parent_head_data_hash, - &candidate.output_head_data_hash, - )?; - self.by_parent_head .entry(candidate.parent_head_data_hash) .or_default() .insert(candidate_hash); - self.by_output_head.insert(candidate.output_head_data_hash, candidate_hash); + self.by_output_head + .entry(candidate.output_head_data_hash) + .or_default() + .insert(candidate_hash); self.by_candidate_hash.insert(candidate_hash, candidate); Ok(()) } - fn check_cycles_or_invalid_tree( - &self, - parent_head_hash: &Hash, - output_head_hash: &Hash, - ) -> Result<(), Error> { - // trivial 0-length cycle. - if parent_head_hash == output_head_hash { - return Err(Error::ZeroLengthCycle) - } - - // multiple paths to the same state, which would break the tree - // assumption. - if self.by_output_head.contains_key(output_head_hash) { - return Err(Error::MultiplePaths) - } - - Ok(()) - } - /// Remove a candidate from the store. fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) { @@ -231,7 +212,12 @@ impl CandidateStorage { } } - self.by_output_head.remove(&entry.output_head_data_hash); + if let Entry::Occupied(mut e) = self.by_output_head.entry(entry.output_head_data_hash) { + e.get_mut().remove(&candidate_hash); + if e.get().is_empty() { + e.remove(); + } + } } } @@ -257,6 +243,11 @@ impl CandidateStorage { self.by_candidate_hash.values() } + /// Return an iterator over the stored candidates. + pub fn into_candidates(self) -> impl Iterator { + self.by_candidate_hash.into_values() + } + /// Get head-data by hash. fn head_data_by_hash(&self, hash: &Hash) -> Option<&HeadData> { // First, search for candidates outputting this head data and extract the head data @@ -266,6 +257,7 @@ impl CandidateStorage { // from their persisted validation data if they exist. self.by_output_head .get(hash) + .and_then(|m| m.iter().next()) .and_then(|a_candidate| self.by_candidate_hash.get(a_candidate)) .map(|e| &e.candidate.commitments.head_data) .or_else(|| { @@ -317,6 +309,8 @@ enum CandidateState { pub enum CandidateEntryError { #[error("Candidate does not match the persisted validation data provided alongside it")] PersistedValidationDataMismatch, + #[error("Candidate is a zero-length cycle")] + ZeroLengthCycle, } #[derive(Debug, Clone)] @@ -348,10 +342,17 @@ impl CandidateEntry { return Err(CandidateEntryError::PersistedValidationDataMismatch) } + let parent_head_data_hash = persisted_validation_data.parent_head.hash(); + let output_head_data_hash = candidate.commitments.head_data.hash(); + + if parent_head_data_hash == output_head_data_hash { + return Err(CandidateEntryError::ZeroLengthCycle) + } + Ok(Self { candidate_hash, - parent_head_data_hash: persisted_validation_data.parent_head.hash(), - output_head_data_hash: candidate.commitments.head_data.hash(), + parent_head_data_hash, + output_head_data_hash, relay_parent: candidate.descriptor.relay_parent, state, candidate: Arc::new(ProspectiveCandidate { @@ -753,6 +754,11 @@ impl FragmentChain { candidate: &impl HypotheticalOrConcreteCandidate, ) -> Result<(), Error> { let candidate_hash = candidate.candidate_hash(); + + if self.scope.get_pending_availability(&candidate_hash).is_some() { + return Err(Error::CandidateAlreadyPendingAvailability(candidate_hash)) + } + if self.candidates.contains(&candidate_hash) || self.unconnected.contains(&candidate_hash) { return Err(Error::CandidateAlreadyKnown(candidate_hash)) } @@ -813,27 +819,14 @@ impl FragmentChain { } } - fn check_cycles_or_invalid_tree( - &self, - parent_head_hash: &Hash, - output_head_hash: &Hash, - ) -> Result<(), Error> { - self.unconnected - .check_cycles_or_invalid_tree(parent_head_hash, output_head_hash)?; - - // trivial 0-length cycle. - if parent_head_hash == output_head_hash { - return Err(Error::ZeroLengthCycle) - } - + fn check_cycles_or_invalid_tree(&self, output_head_hash: &Hash) -> Result<(), Error> { // this should catch a cycle where this candidate would point back to the parent of some // candidate in the chain. if self.by_parent_head.contains_key(output_head_hash) { return Err(Error::Cycle) } - // multiple paths to the same state, which would break the tree - // assumption. + // // multiple paths to the same state, which can't happen for a chain. if self.by_output_head.contains_key(output_head_hash) { return Err(Error::MultiplePaths) } @@ -853,6 +846,13 @@ impl FragmentChain { let relay_parent = candidate.relay_parent(); let parent_head_hash = candidate.parent_head_data_hash(); + // trivial 0-length cycle. + if let Some(output_head_hash) = candidate.output_head_data_hash() { + if parent_head_hash == output_head_hash { + return Err(Error::ZeroLengthCycle) + } + } + let Some(relay_parent) = self.scope.ancestor(&relay_parent) else { return Err(Error::RelayParentNotInScope) }; @@ -876,11 +876,6 @@ impl FragmentChain { } } - // Check for cycles or invalid tree transitions. - if let Some(ref output_head_hash) = candidate.output_head_data_hash() { - self.check_cycles_or_invalid_tree(&parent_head_hash, output_head_hash)?; - } - let constraints = if let Some(parent_candidate) = self.by_output_head.get(&parent_head_hash) { let Some(parent_candidate) = @@ -901,6 +896,11 @@ impl FragmentChain { return Ok(()) }; + // Check for cycles or invalid tree transitions. + if let Some(ref output_head_hash) = candidate.output_head_data_hash() { + self.check_cycles_or_invalid_tree(output_head_hash)?; + } + // We do additional checks for complete candidates. if let (Some(commitments), Some(pvd), Some(validation_code_hash)) = ( candidate.commitments(), @@ -937,10 +937,9 @@ impl FragmentChain { for child_hash in children.iter() { let Some(child) = storage.by_candidate_hash.get(child_hash) else { continue }; - // Detected a cycle. Stop now to avoid looping forever. - // Remove the candidate that creates the cycle. + // Already visited this parent. Either is a cycle or multiple paths that lead to the + // same candidate. Either way, stop this branch to avoid looping forever. if visited.contains(&child.output_head_data_hash) { - to_remove.push(*child_hash); continue } @@ -1012,12 +1011,7 @@ impl FragmentChain { return None }; - if self - .check_cycles_or_invalid_tree( - &candidate.parent_head_data_hash, - &candidate.output_head_data_hash, - ) - .is_err() + if self.check_cycles_or_invalid_tree(&candidate.output_head_data_hash).is_err() { return None } @@ -1084,6 +1078,7 @@ impl FragmentChain { )) }); + // TODO: abstract the fork selection rule into a function. let best_candidate = possible_children.min_by(|child1, child2| { // Always pick a candidate pending availability as best. if self.scope.get_pending_availability(&child1.1).is_some() { diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 95b7b444e3ff..a67c31cc8ff7 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -28,6 +28,7 @@ use std::collections::{HashMap, HashSet}; +use fragment_chain::CandidateStorage; use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ @@ -192,6 +193,8 @@ async fn handle_active_leaves_update( }; let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?; + // TODO: here we actually need to take the full implicit view into account, for this to work + // with core sharing paras. let block_info: RelayChainBlockInfo = match fetch_block_info(&mut *ctx, &mut temp_header_cache, hash).await? { @@ -230,7 +233,7 @@ async fn handle_active_leaves_update( // Find constraints. let mut fragment_chains = HashMap::new(); for para in scheduled_paras { - let mut prev_candidate_storage = prev_fragment_chains + let prev_candidate_storage = prev_fragment_chains .map(|chains| { chains .fragment_chains @@ -263,16 +266,20 @@ async fn handle_active_leaves_update( .await?; let mut compact_pending = Vec::with_capacity(pending_availability.len()); + let mut new_storage = CandidateStorage::default(); + for c in pending_availability { let candidate_hash = c.compact.candidate_hash; - let res = prev_candidate_storage.add_pending_availability_candidate( + let res = new_storage.add_pending_availability_candidate( candidate_hash, c.candidate, c.persisted_validation_data, ); match res { - Ok(_) | Err(FragmentChainError::CandidateAlreadyKnown(_)) => {}, + Ok(_) | + Err(FragmentChainError::CandidateAlreadyKnown(_)) | + Err(FragmentChainError::CandidateAlreadyPendingAvailability(_)) => {}, Err(err) => { gum::warn!( target: LOG_TARGET, @@ -320,7 +327,16 @@ async fn handle_active_leaves_update( "Creating fragment chain" ); - let chain = FragmentChain::populate(scope, &mut prev_candidate_storage); + // Add old candidates to the new storage only after we added the pending availability + // candidates. The pending candidates have higher priority and can conflict with the old + // candidates. + for candidate in prev_candidate_storage.into_candidates() { + // We need to swallow any potential errors here, as they can happen under normal + // operation, with candidates becoming out of scope for example. + let _ = new_storage.add_candidate_entry(candidate); + } + + let chain = FragmentChain::populate(scope, &mut new_storage); gum::trace!( target: LOG_TARGET, @@ -469,6 +485,16 @@ async fn handle_introduce_seconded_candidate( ); added = true; }, + Err(FragmentChainError::CandidateAlreadyPendingAvailability(_)) => { + gum::debug!( + target: LOG_TARGET, + para = ?para, + relay_parent = ?leaf, + "Attempting to introduce a candidate which is already pending availability: {:?}", + candidate_hash + ); + added = true; + }, Err(err) => { gum::debug!( target: LOG_TARGET, @@ -523,6 +549,7 @@ async fn handle_candidate_backed( para_id = ?para, ?candidate_hash, "Received redundant instruction to mark as backed an already backed candidate", + // TODO: this can happen even if the candidate is not in chain. ); found_candidate = true; } @@ -535,7 +562,7 @@ async fn handle_candidate_backed( target: LOG_TARGET, relay_parent = ?leaf, para_id = ?para, - "Candidate chain for para: {:?}", + "Candidate backed. Candidate chain for para: {:?}", new_chain.to_vec() ); @@ -548,7 +575,10 @@ async fn handle_candidate_backed( ); leaf_data.fragment_chains.insert(para, new_chain); + continue } + + leaf_data.fragment_chains.insert(para, chain); } } @@ -564,7 +594,9 @@ async fn handle_candidate_backed( } if !found_candidate { - gum::warn!( + // This can be harmless. It can happen if we received a better backed candidate before and + // dropped this other candidate already. + gum::debug!( target: LOG_TARGET, para_id = ?para, ?candidate_hash, diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index fa16b38d28bd..7007c9412c35 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -794,9 +794,11 @@ async fn select_candidates( relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, ) -> Result, Error> { - gum::trace!(target: LOG_TARGET, + gum::trace!( + target: LOG_TARGET, leaf_hash=?relay_parent, - "before GetBackedCandidates"); + "before GetBackedCandidates" + ); let selected_candidates = match prospective_parachains_mode { ProspectiveParachainsMode::Enabled { .. } => diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 2bb9c82c6a6f..e0d75b574d50 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -2202,7 +2202,9 @@ async fn fragment_chain_update_inner( // 2. find out which are in the frontier gum::debug!( target: LOG_TARGET, - "Calling getHypotheticalMembership from statement distribution" + active_leaf_hash = ?active_leaf_hash, + "Calling getHypotheticalMembership from statement distribution: {:?}", + &hypotheticals ); let candidate_memberships = { let (tx, rx) = oneshot::channel(); From 7a59fcfc630efe082369beb5552217dc96e68977 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 3 Jul 2024 17:54:27 +0300 Subject: [PATCH 06/56] add doc comments to HypotheticalOrConcreteCandidate --- .../node/subsystem-util/src/inclusion_emulator/mod.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index 666f2df11aa8..cb16369a9efe 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -803,13 +803,22 @@ fn validate_against_constraints( .map_err(FragmentValidityError::OutputsInvalid) } +/// Trait for a hypothetical or concrete candidate, as needed when assessing the validity of a +/// potential candidate. pub trait HypotheticalOrConcreteCandidate { + /// Return a reference to the candidate commitments, if present. fn commitments(&self) -> Option<&CandidateCommitments>; + /// Return a reference to the persisted validation data, if present. fn persisted_validation_data(&self) -> Option<&PersistedValidationData>; + /// Return a reference to the validation code hash, if present. fn validation_code_hash(&self) -> Option<&ValidationCodeHash>; + /// Return the parent head hash. fn parent_head_data_hash(&self) -> Hash; + /// Return the output head hash, if present. fn output_head_data_hash(&self) -> Option; + /// Return the relay parent hash. fn relay_parent(&self) -> Hash; + /// Return the candidate hash. fn candidate_hash(&self) -> CandidateHash; } From 33f239e82eca669170437654242e25b5631ad58e Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 4 Jul 2024 09:18:31 +0300 Subject: [PATCH 07/56] fix compilation --- polkadot/node/core/prospective-parachains/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index a67c31cc8ff7..df09331d9888 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -165,7 +165,7 @@ async fn handle_active_leaves_update( ctx: &mut Context, view: &mut View, update: ActiveLeavesUpdate, - metrics: &Metrics, + _metrics: &Metrics, ) -> JfyiErrorResult<()> { // 1. clean up inactive leaves // 2. determine all scheduled paras at the new block From ac9285ef8e250921ff8e9b832b6887826df8e286 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 4 Jul 2024 11:08:05 +0300 Subject: [PATCH 08/56] some minor refactors --- .../src/fragment_chain/mod.rs | 52 +++++++++++++------ .../core/prospective-parachains/src/lib.rs | 48 ++++++----------- polkadot/node/core/provisioner/src/lib.rs | 2 +- .../statement-distribution/src/v2/mod.rs | 4 ++ 4 files changed, 57 insertions(+), 49 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 1a72bd6c522c..6a3b216c71f0 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -107,6 +107,7 @@ use polkadot_primitives::{ }; use thiserror::Error; +// TODO: fix this. const EXTRA_UNCONNECTED_COUNT: usize = 10; /// Fragment chain related errors. @@ -146,6 +147,12 @@ pub(crate) enum Error { RelayParentNotInScope, } +/// The rule for selecting between two backed candidate forks, when adding to the chain. +/// All validators should adhere to this rule. +fn fork_selection_rule(hash1: &CandidateHash, hash2: &CandidateHash) -> Ordering { + hash1.cmp(hash2) +} + /// Stores candidates and information about them such as their relay-parents and their backing /// states. #[derive(Clone, Default)] @@ -612,6 +619,15 @@ impl FragmentChain { self.unconnected.candidates() } + /// Return whether this candidate is backed in this chain or the unconnected storage. + pub fn is_candidate_backed(&self, hash: &CandidateHash) -> bool { + self.candidates.contains(hash) || + matches!( + self.unconnected.by_candidate_hash.get(hash), + Some(candidate) if candidate.state == CandidateState::Backed + ) + } + pub fn as_candidate_storage(&self) -> CandidateStorage { let mut storage = self.unconnected.clone(); @@ -871,7 +887,7 @@ impl FragmentChain { // If the candidate is backed and in the current chain, accept only a candidate with // a lower hash. - if other_candidate < &candidate.candidate_hash() { + if fork_selection_rule(other_candidate, &candidate.candidate_hash()) == Ordering::Less { return Err(Error::ForkChoiceRule) } } @@ -1078,17 +1094,18 @@ impl FragmentChain { )) }); - // TODO: abstract the fork selection rule into a function. - let best_candidate = possible_children.min_by(|child1, child2| { - // Always pick a candidate pending availability as best. - if self.scope.get_pending_availability(&child1.1).is_some() { - Ordering::Less - } else if self.scope.get_pending_availability(&child2.1).is_some() { - Ordering::Greater - } else { - child1.1.cmp(&child2.1) - } - }); + let best_candidate = + possible_children.min_by(|(_, ref child1, _, _), (_, ref child2, _, _)| { + // Always pick a candidate pending availability as best. + if self.scope.get_pending_availability(child1).is_some() { + Ordering::Less + } else if self.scope.get_pending_availability(child2).is_some() { + Ordering::Greater + } else { + // Otherwise, use the fork selection rule. + fork_selection_rule(child1, child2) + } + }); if let Some((fragment, candidate_hash, output_head_data_hash, parent_head_data_hash)) = best_candidate @@ -1119,12 +1136,13 @@ impl FragmentChain { } } - pub fn candidate_backed(mut self, newly_backed_candidate: &CandidateHash) -> Self { - if !self.unconnected.mark_backed(newly_backed_candidate) { - return self + pub fn candidate_backed(&self, newly_backed_candidate: &CandidateHash) -> Option { + let mut old_storage = self.as_candidate_storage(); + + if !old_storage.mark_backed(newly_backed_candidate) { + return None } - let mut old_storage = self.as_candidate_storage(); - Self::populate(self.scope, &mut old_storage) + Some(Self::populate(self.scope.clone(), &mut old_storage)) } } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index df09331d9888..eefcfdccd77b 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -139,9 +139,9 @@ async fn run_iteration( FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Communication { msg } => match msg { ProspectiveParachainsMessage::IntroduceSecondedCandidate(request, tx) => - handle_introduce_seconded_candidate(&mut *ctx, view, request, tx, metrics).await, + handle_introduce_seconded_candidate(view, request, tx, metrics).await, ProspectiveParachainsMessage::CandidateBacked(para, candidate_hash) => - handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await, + handle_candidate_backed(view, para, candidate_hash).await, ProspectiveParachainsMessage::GetBackableCandidates( relay_parent, para, @@ -192,12 +192,10 @@ async fn handle_active_leaves_update( return Ok(()) }; - let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?; - // TODO: here we actually need to take the full implicit view into account, for this to work - // with core sharing paras. + let scheduled_paras = fetch_upcoming_paras(ctx, hash).await?; let block_info: RelayChainBlockInfo = - match fetch_block_info(&mut *ctx, &mut temp_header_cache, hash).await? { + match fetch_block_info(ctx, &mut temp_header_cache, hash).await? { None => { gum::warn!( target: LOG_TARGET, @@ -220,7 +218,7 @@ async fn handle_active_leaves_update( allowed_ancestry_len }; let mut ancestry = - fetch_ancestry(&mut *ctx, &mut temp_header_cache, hash, requested_ancestry_len).await?; + fetch_ancestry(ctx, &mut temp_header_cache, hash, requested_ancestry_len).await?; let prev_fragment_chains = ancestry.first().and_then(|prev_leaf| view.active_leaves.get(&prev_leaf.hash)); @@ -243,7 +241,7 @@ async fn handle_active_leaves_update( }) .unwrap_or_default(); - let backing_state = fetch_backing_state(&mut *ctx, hash, para).await?; + let backing_state = fetch_backing_state(ctx, hash, para).await?; let Some((constraints, pending_availability)) = backing_state else { // This indicates a runtime conflict of some kind. @@ -426,9 +424,7 @@ async fn preprocess_candidates_pending_availability( Ok(importable) } -#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] -async fn handle_introduce_seconded_candidate( - _ctx: &mut Context, +async fn handle_introduce_seconded_candidate( view: &mut View, request: IntroduceSecondedCandidateRequest, tx: oneshot::Sender, @@ -530,40 +526,31 @@ async fn handle_introduce_seconded_candidate( let _ = tx.send(added); } -#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] -async fn handle_candidate_backed( - _ctx: &mut Context, - view: &mut View, - para: ParaId, - candidate_hash: CandidateHash, -) { +async fn handle_candidate_backed(view: &mut View, para: ParaId, candidate_hash: CandidateHash) { // Repopulate the fragment chains. let mut found_candidate = false; let mut found_para = false; for (leaf, leaf_data) in view.active_leaves.iter_mut() { - if let Some(chain) = leaf_data.fragment_chains.remove(¶) { + if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { found_para = true; - if chain.contains_candidate(&candidate_hash) { + if chain.is_candidate_backed(&candidate_hash) { gum::debug!( target: LOG_TARGET, para_id = ?para, ?candidate_hash, "Received redundant instruction to mark as backed an already backed candidate", - // TODO: this can happen even if the candidate is not in chain. ); found_candidate = true; - } - - if chain.contains_unconnected_candidate(&candidate_hash) { + } else if chain.contains_unconnected_candidate(&candidate_hash) { found_candidate = true; - let new_chain = chain.candidate_backed(&candidate_hash); + let maybe_new_chain = chain.candidate_backed(&candidate_hash); gum::trace!( target: LOG_TARGET, relay_parent = ?leaf, para_id = ?para, "Candidate backed. Candidate chain for para: {:?}", - new_chain.to_vec() + maybe_new_chain.as_ref().unwrap_or(chain).to_vec() ); gum::trace!( @@ -571,14 +558,13 @@ async fn handle_candidate_backed( relay_parent = ?leaf, para_id = ?para, "Potential candidate storage for para: {:?}", - new_chain.unconnected().map(|candidate| candidate.hash()).collect::>() + maybe_new_chain.as_ref().unwrap_or(chain).unconnected().map(|candidate| candidate.hash()).collect::>() ); - leaf_data.fragment_chains.insert(para, new_chain); - continue + if let Some(new_chain) = maybe_new_chain { + *chain = new_chain; + } } - - leaf_data.fragment_chains.insert(para, chain); } } diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 7007c9412c35..e658271dfbdb 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -273,7 +273,7 @@ async fn handle_communication( let span = state.span.child("provisionable-data"); let _timer = metrics.time_provisionable_data(); - gum::trace!(target: LOG_TARGET, ?relay_parent, "Received provisionable data."); + gum::trace!(target: LOG_TARGET, ?relay_parent, "Received provisionable data: {:?}", &data); note_provisionable_data(state, &span, data); } diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index e0d75b574d50..77cd59d69fbd 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -2199,6 +2199,10 @@ async fn fragment_chain_update_inner( Some(h) => h, }; + if hypotheticals.is_empty() { + return + } + // 2. find out which are in the frontier gum::debug!( target: LOG_TARGET, From 2b58595a94acb78a9d56f3f62a9efb727c94e3b1 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 4 Jul 2024 17:04:06 +0300 Subject: [PATCH 09/56] mostly cosmetics --- .../core/prospective-parachains/src/error.rs | 15 --- .../src/fragment_chain/mod.rs | 105 +++++++++--------- .../core/prospective-parachains/src/lib.rs | 44 ++++---- 3 files changed, 79 insertions(+), 85 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/error.rs b/polkadot/node/core/prospective-parachains/src/error.rs index 2b0933ab1c7e..4b332b9c5de5 100644 --- a/polkadot/node/core/prospective-parachains/src/error.rs +++ b/polkadot/node/core/prospective-parachains/src/error.rs @@ -30,18 +30,6 @@ use fatality::Nested; #[allow(missing_docs)] #[fatality::fatality(splitable)] pub enum Error { - #[fatal] - #[error("SubsystemError::Context error: {0}")] - SubsystemContext(String), - - #[fatal] - #[error("Spawning a task failed: {0}")] - SpawnFailed(SubsystemError), - - #[fatal] - #[error("Participation worker receiver exhausted.")] - ParticipationWorkerReceiverExhausted, - #[fatal] #[error("Receiving message from overseer failed: {0}")] SubsystemReceive(#[source] SubsystemError), @@ -55,9 +43,6 @@ pub enum Error { #[error(transparent)] ChainApi(#[from] ChainApiError), - #[error(transparent)] - Subsystem(SubsystemError), - #[error("Request to chain API subsystem dropped")] ChainApiRequestCanceled(oneshot::Canceled), diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 6a3b216c71f0..14bc2859a345 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -107,44 +107,41 @@ use polkadot_primitives::{ }; use thiserror::Error; -// TODO: fix this. -const EXTRA_UNCONNECTED_COUNT: usize = 10; - /// Fragment chain related errors. #[derive(Debug, Clone, PartialEq, Error)] pub(crate) enum Error { - #[error("Candidate already known: {0}")] - CandidateAlreadyKnown(CandidateHash), - #[error("Candidate already pending availability: {0}")] - CandidateAlreadyPendingAvailability(CandidateHash), - #[error("Candidate would introduce a zero-length cycle")] + #[error("Candidate already known")] + CandidateAlreadyKnown, + #[error("Candidate is already pending availability")] + CandidateAlreadyPendingAvailability, + #[error("Candidate's parent head is equal to its output head. Would introduce a cycle.")] ZeroLengthCycle, #[error("Candidate would introduce a cycle")] Cycle, - #[error("Candidate would introduce two paths to the same state")] + #[error("Candidate would introduce two paths to the same output state")] MultiplePaths, - #[error("Attempting to directly introduce a Backed candidate. It should first be introduced as Seconded: {0}")] - IntroduceBackedCandidate(CandidateHash), - #[error("Too many candidates")] - TooManyCandidates, - #[error("RelayParentPrecedesCandidatePendingAvailability")] - RelayParentPrecedesCandidatePendingAvailability, - #[error("ForkWithCandidatePendingAvailability")] - ForkWithCandidatePendingAvailability, - #[error("ForkChoiceRule")] - ForkChoiceRule, - #[error("ParentCandidateNotFound")] + #[error("Attempting to directly introduce a Backed candidate. It should first be introduced as Seconded")] + IntroduceBackedCandidate, + #[error("Current backed candidate chain reached the `max_candidate_depth + 1` limit")] + ChainTooLong, + #[error("Relay parent {0:?} of the candidate precedes the relay parent {0:?} of a pending availability candidate")] + RelayParentPrecedesCandidatePendingAvailability(Hash, Hash), + #[error("Candidate would introduce a fork with a pending availability candidate: {0:?}")] + ForkWithCandidatePendingAvailability(CandidateHash), + #[error("Fork selection rule favours another candidate: {0:?}")] + ForkChoiceRule(CandidateHash), + #[error("Could not find parent of the candidate")] ParentCandidateNotFound, - #[error("ComputeConstraints: {0:?}")] + #[error("Could not compute candidate constraints: {0:?}")] ComputeConstraints(inclusion_emulator::ModificationError), - #[error("CheckAgainstConstraints: {0:?}")] + #[error("Candidate violates constraints: {0:?}")] CheckAgainstConstraints(inclusion_emulator::FragmentValidityError), - #[error("RelayParentMovedBackwards")] + #[error("Relay parent would move backwards from the latest candidate in the chain")] RelayParentMovedBackwards, - #[error("CandidateEntry: {0}")] + #[error(transparent)] CandidateEntry(#[from] CandidateEntryError), - #[error("RelayParentNotInScope")] - RelayParentNotInScope, + #[error("Relay parent {0:?} not in scope. Earliest relay parent allowed {0:?}")] + RelayParentNotInScope(Hash, Hash), } /// The rule for selecting between two backed candidate forks, when adding to the chain. @@ -193,7 +190,7 @@ impl CandidateStorage { pub fn add_candidate_entry(&mut self, candidate: CandidateEntry) -> Result<(), Error> { let candidate_hash = candidate.candidate_hash; if self.by_candidate_hash.contains_key(&candidate_hash) { - return Err(Error::CandidateAlreadyKnown(candidate_hash)) + return Err(Error::CandidateAlreadyKnown) } self.by_parent_head @@ -296,6 +293,7 @@ impl CandidateStorage { }) } + #[cfg(test)] fn len(&self) -> usize { self.by_candidate_hash.len() } @@ -316,7 +314,7 @@ enum CandidateState { pub enum CandidateEntryError { #[error("Candidate does not match the persisted validation data provided alongside it")] PersistedValidationDataMismatch, - #[error("Candidate is a zero-length cycle")] + #[error("Candidate's parent head is equal to its output head. Would introduce a cycle")] ZeroLengthCycle, } @@ -772,17 +770,11 @@ impl FragmentChain { let candidate_hash = candidate.candidate_hash(); if self.scope.get_pending_availability(&candidate_hash).is_some() { - return Err(Error::CandidateAlreadyPendingAvailability(candidate_hash)) + return Err(Error::CandidateAlreadyPendingAvailability) } if self.candidates.contains(&candidate_hash) || self.unconnected.contains(&candidate_hash) { - return Err(Error::CandidateAlreadyKnown(candidate_hash)) - } - - if (self.chain.len() + self.unconnected.len()) > - (self.scope.max_depth + EXTRA_UNCONNECTED_COUNT) - { - return Err(Error::TooManyCandidates) + return Err(Error::CandidateAlreadyKnown) } self.check_potential(candidate) @@ -793,7 +785,7 @@ impl FragmentChain { candidate: &CandidateEntry, ) -> Result<(), Error> { if candidate.state == CandidateState::Backed { - return Err(Error::IntroduceBackedCandidate(candidate.candidate_hash)); + return Err(Error::IntroduceBackedCandidate); } let res = self.can_add_candidate_as_potential(&candidate); @@ -821,7 +813,6 @@ impl FragmentChain { let res = self.can_add_candidate_as_potential(&candidate); match res { - Err(Error::TooManyCandidates) => break, Ok(()) => { // This clone is cheap because the expensive stuff is wrapped in an Arc let Ok(()) = self.unconnected.add_candidate_entry(candidate.clone()) else { @@ -842,7 +833,7 @@ impl FragmentChain { return Err(Error::Cycle) } - // // multiple paths to the same state, which can't happen for a chain. + // multiple paths to the same state, which can't happen for a chain. if self.by_output_head.contains_key(output_head_hash) { return Err(Error::MultiplePaths) } @@ -870,41 +861,54 @@ impl FragmentChain { } let Some(relay_parent) = self.scope.ancestor(&relay_parent) else { - return Err(Error::RelayParentNotInScope) + return Err(Error::RelayParentNotInScope( + relay_parent, + self.scope.earliest_relay_parent().hash, + )) }; - let earliest_rp = self.earliest_relay_parent_pending_availability(); - if relay_parent.number < earliest_rp.number { - return Err(Error::RelayParentPrecedesCandidatePendingAvailability) // relay parent moved - // backwards. + let earliest_rp_of_pending_availability = self.earliest_relay_parent_pending_availability(); + // relay parent moved backwards. + if relay_parent.number < earliest_rp_of_pending_availability.number { + return Err(Error::RelayParentPrecedesCandidatePendingAvailability( + relay_parent.hash, + earliest_rp_of_pending_availability.hash, + )) } // Check if it's a fork with a backed candidate. if let Some(other_candidate) = self.by_parent_head.get(&parent_head_hash) { if self.scope().get_pending_availability(other_candidate).is_some() { // Cannot accept a fork with a candidate pending availability. - return Err(Error::ForkWithCandidatePendingAvailability) + return Err(Error::ForkWithCandidatePendingAvailability(*other_candidate)) } // If the candidate is backed and in the current chain, accept only a candidate with // a lower hash. if fork_selection_rule(other_candidate, &candidate.candidate_hash()) == Ordering::Less { - return Err(Error::ForkChoiceRule) + return Err(Error::ForkChoiceRule(*other_candidate)) } } let constraints = if let Some(parent_candidate) = self.by_output_head.get(&parent_head_hash) { - let Some(parent_candidate) = - self.chain.iter().find(|c| &c.candidate_hash == parent_candidate) + let Some(parent_candidate_index) = + self.chain.iter().position(|c| &c.candidate_hash == parent_candidate) else { + // Should never really happen. return Err(Error::ParentCandidateNotFound) }; + + if parent_candidate_index > self.scope.max_depth { + return Err(Error::ChainTooLong) + } + + let parent_candidate = &self.chain[parent_candidate_index]; self.scope .base_constraints .apply_modifications(&parent_candidate.cumulative_modifications) .map_err(Error::ComputeConstraints)? - // Check if it builds on the latest included candidate } else if self.scope.base_constraints.required_parent.hash() == parent_head_hash { + // It builds on the latest included candidate. self.scope.base_constraints.clone() } else { // If the parent is not yet part of the chain, there's nothing else we can check for @@ -931,8 +935,9 @@ impl FragmentChain { pvd, ) .map_err(Error::CheckAgainstConstraints)?; - // Otherwise, at least check the relay parent progresses. + // TODO: make sure this checks the relay parent to not move backwards. } else if relay_parent.number < constraints.min_relay_parent_number { + // Otherwise, at least check the relay parent progresses. return Err(Error::RelayParentMovedBackwards) } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index eefcfdccd77b..61b7757e979c 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -70,7 +70,7 @@ use self::metrics::Metrics; const LOG_TARGET: &str = "parachain::prospective-parachains"; struct RelayBlockViewData { - // Scheduling info for paras and upcoming paras. + // The fragment chains for current and upcoming scheduled paras. fragment_chains: HashMap, } @@ -167,10 +167,14 @@ async fn handle_active_leaves_update( update: ActiveLeavesUpdate, _metrics: &Metrics, ) -> JfyiErrorResult<()> { - // 1. clean up inactive leaves - // 2. determine all scheduled paras at the new block - // 3. construct new fragment chain for each para for each new leaf - // 4. prune candidate storage. + // For each active leaf: + // - determine the scheduled paras + // - pre-populate the candidate storage with pending availability candidates and candidates from + // the parent leaf. + // - populate the fragment chain + // + // Only then, clean up inactive leaves. They must be cleaned only after new leaves are + // processed, because we may reuse their candidates. let mut temp_header_cache = HashMap::new(); for activated in update.activated.into_iter() { @@ -228,9 +232,9 @@ async fn handle_active_leaves_update( ancestry.clear(); } - // Find constraints. let mut fragment_chains = HashMap::new(); for para in scheduled_paras { + // Get the candidate storage of the parent leaf, if present. let prev_candidate_storage = prev_fragment_chains .map(|chains| { chains @@ -241,8 +245,8 @@ async fn handle_active_leaves_update( }) .unwrap_or_default(); + // Find constraints and pending availability candidates. let backing_state = fetch_backing_state(ctx, hash, para).await?; - let Some((constraints, pending_availability)) = backing_state else { // This indicates a runtime conflict of some kind. gum::debug!( @@ -276,8 +280,8 @@ async fn handle_active_leaves_update( match res { Ok(_) | - Err(FragmentChainError::CandidateAlreadyKnown(_)) | - Err(FragmentChainError::CandidateAlreadyPendingAvailability(_)) => {}, + Err(FragmentChainError::CandidateAlreadyKnown) | + Err(FragmentChainError::CandidateAlreadyPendingAvailability) => {}, Err(err) => { gum::warn!( target: LOG_TARGET, @@ -334,6 +338,7 @@ async fn handle_active_leaves_update( let _ = new_storage.add_candidate_entry(candidate); } + // Finally, populate the fragment chain. let chain = FragmentChain::populate(scope, &mut new_storage); gum::trace!( @@ -471,7 +476,7 @@ async fn handle_introduce_seconded_candidate( ); added = true; }, - Err(FragmentChainError::CandidateAlreadyKnown(_)) => { + Err(FragmentChainError::CandidateAlreadyKnown) => { gum::debug!( target: LOG_TARGET, para = ?para, @@ -481,7 +486,7 @@ async fn handle_introduce_seconded_candidate( ); added = true; }, - Err(FragmentChainError::CandidateAlreadyPendingAvailability(_)) => { + Err(FragmentChainError::CandidateAlreadyPendingAvailability) => { gum::debug!( target: LOG_TARGET, para = ?para, @@ -527,7 +532,6 @@ async fn handle_introduce_seconded_candidate( } async fn handle_candidate_backed(view: &mut View, para: ParaId, candidate_hash: CandidateHash) { - // Repopulate the fragment chains. let mut found_candidate = false; let mut found_para = false; for (leaf, leaf_data) in view.active_leaves.iter_mut() { @@ -543,6 +547,7 @@ async fn handle_candidate_backed(view: &mut View, para: ParaId, candidate_hash: found_candidate = true; } else if chain.contains_unconnected_candidate(&candidate_hash) { found_candidate = true; + // Now that a candidate was backed, attempt to recreate the fragment chain. let maybe_new_chain = chain.candidate_backed(&candidate_hash); gum::trace!( @@ -561,6 +566,7 @@ async fn handle_candidate_backed(view: &mut View, para: ParaId, candidate_hash: maybe_new_chain.as_ref().unwrap_or(chain).unconnected().map(|candidate| candidate.hash()).collect::>() ); + // Replace the old chain with the new one. if let Some(new_chain) = maybe_new_chain { *chain = new_chain; } @@ -687,16 +693,19 @@ fn answer_hypothetical_membership_request( let res = fragment_chain.can_add_candidate_as_potential(candidate); match res { - Err(FragmentChainError::CandidateAlreadyKnown(_)) | Ok(()) => { + Err(FragmentChainError::CandidateAlreadyKnown) | Ok(()) => { membership.push(*active_leaf); }, + // This will also match if the candidate is already pending availability. + // In this case, we don't need to validate it again or distribute its statements. + // It's already on chain. Err(err) => { gum::debug!( target: LOG_TARGET, para = ?para_id, leaf = ?active_leaf, candidate = ?candidate.candidate_hash(), - "Candidate is not a hypothetical member: {:?}", + "Candidate is not a hypothetical member: {}", err ) }, @@ -727,12 +736,7 @@ fn answer_prospective_validation_data_request( request: ProspectiveValidationDataRequest, tx: oneshot::Sender>, ) { - // 1. Try to get the head-data from the candidate store if known. - // 2. Otherwise, it might exist as the base in some relay-parent and we can find it by iterating - // fragment chains. - // 3. Otherwise, it is unknown. - // 4. Also try to find the relay parent block info by scanning fragment chains. - // 5. If head data and relay parent block info are found - success. Otherwise, failure. + // Try getting the needed data from any fragment chain. let (mut head_data, parent_head_data_hash) = match request.parent_head_data { ParentHeadData::OnlyHash(parent_head_data_hash) => (None, parent_head_data_hash), From e34de0e0fa82ec52ce2019453b380eb3c43c693f Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 5 Jul 2024 15:57:28 +0300 Subject: [PATCH 10/56] start updating comments --- .../src/fragment_chain/mod.rs | 206 +++++++++++------- .../core/prospective-parachains/src/lib.rs | 2 +- 2 files changed, 130 insertions(+), 78 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 14bc2859a345..5cad5722119d 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -150,18 +150,17 @@ fn fork_selection_rule(hash1: &CandidateHash, hash2: &CandidateHash) -> Ordering hash1.cmp(hash2) } -/// Stores candidates and information about them such as their relay-parents and their backing -/// states. +/// Utility for storing candidates and information about them such as their relay-parents and their +/// backing states. This does not assume any restriction on whether or not the candidates form a +/// chain. Useful for storing all kinds of candidates. #[derive(Clone, Default)] pub(crate) struct CandidateStorage { - // Index from head data hash to candidate hashes with that head data as a parent. Purely for + // Index from head data hash to candidate hashes with that head data as a parent. Useful for // efficiency when responding to `ProspectiveValidationDataRequest`s or when trying to find a // new candidate to push to a chain. - // Even though having multiple candidates with same parent would be invalid for a parachain, it - // could happen across different relay chain forks, hence the HashSet. by_parent_head: HashMap>, - // Index from head data hash to candidate hashes outputting that head data. Purely for + // Index from head data hash to candidate hashes outputting that head data. For // efficiency when responding to `ProspectiveValidationDataRequest`s. by_output_head: HashMap>, @@ -170,7 +169,7 @@ pub(crate) struct CandidateStorage { } impl CandidateStorage { - /// Introduce a new candidate. + /// Introduce a new pending availability candidate. pub fn add_pending_availability_candidate( &mut self, candidate_hash: CandidateHash, @@ -187,6 +186,7 @@ impl CandidateStorage { self.add_candidate_entry(entry) } + /// Introduce a new candidate entry. pub fn add_candidate_entry(&mut self, candidate: CandidateEntry) -> Result<(), Error> { let candidate_hash = candidate.candidate_hash; if self.by_candidate_hash.contains_key(&candidate_hash) { @@ -225,7 +225,8 @@ impl CandidateStorage { } } - /// Note that an existing candidate has been backed. + /// Note that an existing candidate has been backed. Return false if the candidate was not + /// found. fn mark_backed(&mut self, candidate_hash: &CandidateHash) -> bool { if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { gum::trace!(target: LOG_TARGET, ?candidate_hash, "Candidate marked as backed"); @@ -242,17 +243,17 @@ impl CandidateStorage { self.by_candidate_hash.contains_key(candidate_hash) } - /// Return an iterator over the stored candidates. + /// Return an iterator over references to the stored candidates, in arbitrary order. fn candidates(&self) -> impl Iterator { self.by_candidate_hash.values() } - /// Return an iterator over the stored candidates. + /// Consume self into an iterator over the stored candidates, in arbitrary order. pub fn into_candidates(self) -> impl Iterator { self.by_candidate_hash.into_values() } - /// Get head-data by hash. + /// Try getting head-data by hash. fn head_data_by_hash(&self, hash: &Hash) -> Option<&HeadData> { // First, search for candidates outputting this head data and extract the head data // from their commitments if they exist. @@ -273,10 +274,7 @@ impl CandidateStorage { }) } - /// Returns the candidates which have the given head data hash as parent. - /// We don't allow forks in a parachain, but we may have multiple candidates with same parent - /// across different relay chain forks. That's why it returns an iterator (but only one will be - /// valid and used in the end). + /// Returns the backed candidates which have the given head data hash as parent. fn possible_backed_para_children<'a>( &'a self, parent_head_hash: &'a Hash, @@ -311,6 +309,7 @@ enum CandidateState { } #[derive(Debug, Clone, PartialEq, Error)] +/// Possible errors when construcing a candidate entry. pub enum CandidateEntryError { #[error("Candidate does not match the persisted validation data provided alongside it")] PersistedValidationDataMismatch, @@ -319,6 +318,7 @@ pub enum CandidateEntryError { } #[derive(Debug, Clone)] +/// Representation of a candidate into the [`CandidateStorage`]. pub(crate) struct CandidateEntry { candidate_hash: CandidateHash, parent_head_data_hash: Hash, @@ -329,6 +329,7 @@ pub(crate) struct CandidateEntry { } impl CandidateEntry { + /// Create a new seconded candidate entry. pub fn new_seconded( candidate_hash: CandidateHash, candidate: CommittedCandidateReceipt, @@ -546,28 +547,43 @@ impl FragmentNode { } } -/// This is a chain of candidates based on some underlying storage of candidates and a scope. +/// This is the fragment chain specific to an active leaf. /// -/// All nodes in the chain must be either pending availability or within the scope. Within the scope -/// means it's built off of the relay-parent or an ancestor. +/// It holds the current best backable candidate chain, as well as potential candidates +/// which could become connected to the chain in the future or which could even overwrite the +/// existing chain. pub(crate) struct FragmentChain { + // The current scope, which dictates the on-chain operating constraints that all future + // candidates must adhere to. scope: Scope, + // The current best chain of backable candidates. It only contains candidates which build on + // top of each other and which have reached the backing quorum. In the presence of potential + // forks, this chain will pick a fork according to the `fork_selection_rule`. chain: Vec, + // A set of the candidate hashes in the `chain`. candidates: HashSet, // Index from head data hash to the candidate hash with that head data as a parent. + // Only contains the candidates present in the `chain`. by_parent_head: HashMap, - // Index from head data hash to candidate hashes outputting that head data. + // Index from head data hash to the candidate hash outputting that head data. + // Only contains the candidates present in the `chain`. by_output_head: HashMap, + // The potential candidate storage. Contains candidates which are not yet part of the `chain` + // but may become in the future. These can form any tree shape as well as contain any + // unconnected candidates for which we don't know the parent. unconnected: CandidateStorage, } impl FragmentChain { - /// Create a new [`FragmentChain`] with given scope and populated from the storage. - pub fn populate(scope: Scope, parent_candidates: &mut CandidateStorage) -> Self { + /// Create a new [`FragmentChain`] with given scope and populated from the given storage. + /// The `prev_storage` should contain the candidates of the `FragmentChain` at the previous + /// relay parent, as well as the candidates pending availability at this relay parent. + pub fn populate(scope: Scope, mut prev_storage: CandidateStorage) -> Self { + // Initialize as empty let mut fragment_chain = Self { scope, chain: Vec::new(), @@ -577,32 +593,35 @@ impl FragmentChain { unconnected: CandidateStorage::default(), }; - fragment_chain.populate_chain(parent_candidates); + // First populate the best backable chain. + fragment_chain.populate_chain(&mut prev_storage); - // Trim the forks that we know can no longer make it on-chain. - fragment_chain.trim_uneligible_forks(parent_candidates); + // Now that we picked the best backable chain, trim the forks generated by candidates which + // are not present in the best chain. + fragment_chain.trim_uneligible_forks(&mut prev_storage); - fragment_chain.populate_unconnected_potential_candidates(parent_candidates); + // Finally, keep any candidates which haven't been trimmed but still have potential. + fragment_chain.populate_unconnected_potential_candidates(prev_storage); fragment_chain } - /// Get the scope of the Fragment Chain. + /// Get the scope of the [`FragmentChain`]. pub fn scope(&self) -> &Scope { &self.scope } - /// Returns the number of candidates in the chain + /// Returns the number of candidates in the best backable chain. pub fn len(&self) -> usize { self.candidates.len() } - /// Whether the candidate exists. + /// Whether the candidate is part of the best backable chain. pub fn contains_candidate(&self, candidate: &CandidateHash) -> bool { self.candidates.contains(candidate) } - /// Whether the candidate exists. + /// Whether the candidate exists as part of the unconnected potential candidates. pub fn contains_unconnected_candidate(&self, candidate: &CandidateHash) -> bool { self.unconnected.contains(candidate) } @@ -612,7 +631,7 @@ impl FragmentChain { self.chain.iter().map(|candidate| candidate.candidate_hash).collect() } - /// Return a vector of the chain's candidate hashes, in-order. + /// Return a vector of the unconnected potential candidate hashes, in arbitrary order. pub fn unconnected(&self) -> impl Iterator { self.unconnected.candidates() } @@ -626,31 +645,34 @@ impl FragmentChain { ) } + /// Return a new [`CandidateStorage`] containing all the candidates from this `FragmentChain`, + /// as well as the unconnected ones. pub fn as_candidate_storage(&self) -> CandidateStorage { let mut storage = self.unconnected.clone(); for candidate in self.chain.iter() { - let Ok(()) = storage.add_candidate_entry(CandidateEntry { + let _ = storage.add_candidate_entry(CandidateEntry { candidate_hash: candidate.candidate_hash, parent_head_data_hash: candidate.parent_head_data_hash, output_head_data_hash: candidate.output_head_data_hash, relay_parent: candidate.relay_parent(), candidate: candidate.fragment.candidate_clone(), // This clone is very cheap. state: CandidateState::Backed, - }) else { - continue - }; + }); } storage } + /// Try getting the full head data associated with this hash. pub fn get_head_data_by_hash(&self, head_data_hash: &Hash) -> Option { + // First, see if this is the head data of the latest included candidate. let required_parent = &self.scope.base_constraints().required_parent; if &required_parent.hash() == head_data_hash { return Some(required_parent.clone()) } + // Cheaply check if the head data is in the best backable chain. let has_head_data_in_chain = self .by_parent_head .get(head_data_hash) @@ -676,6 +698,7 @@ impl FragmentChain { }); } + // Lastly, try getting the head data from the unconnected candidates. self.unconnected.head_data_by_hash(head_data_hash).cloned() } @@ -698,6 +721,8 @@ impl FragmentChain { let mut res = Vec::with_capacity(actual_end_index - base_pos); for elem in &self.chain[base_pos..actual_end_index] { + // Only supply candidates which are not yet pending availability. `ancestors` should + // have already contained them, but check just in case. if self.scope.get_pending_availability(&elem.candidate_hash).is_none() { res.push((elem.candidate_hash, elem.relay_parent())); } else { @@ -727,8 +752,8 @@ impl FragmentChain { self.chain.len() } - // Return the earliest relay parent a new candidate can have in order to be added to the chain. - // This is the relay parent of the last candidate in the chain. + // Return the earliest relay parent a new candidate can have in order to be added to the chain + // right now. This is the relay parent of the last candidate in the chain. // The value returned may not be valid if we want to add a candidate pending availability, which // may have a relay parent which is out of scope. Special handling is needed in that case. // `None` is returned if the candidate's relay parent info cannot be found. @@ -746,6 +771,9 @@ impl FragmentChain { } } + // Return the earliest relay parent a potential candidate may have for it to ever be added to + // the chain. This is the relay parent of the last candidate pending availability or the + // earliest relay parent in scope. fn earliest_relay_parent_pending_availability(&self) -> RelayChainBlockInfo { self.chain .iter() @@ -758,11 +786,11 @@ impl FragmentChain { .unwrap_or_else(|| self.scope.earliest_relay_parent()) } - // Checks if this candidate could be added in the future to this chain. - // This assumes that the chain does not already contain this candidate. It may or may not be - // present in the `CandidateStorage`. - // Even if the candidate is a potential candidate, this function will indicate that it can be - // kept only if there's enough room for it. + /// Checks if this candidate could be added in the future to this chain. + /// This will return `Error::CandidateAlreadyKnown` if the candidate is alrady in the chain or + /// the unconnected candidate storage. It will return + /// `Error::CandidateAlreadyPendingAvailability` if the candidate is already pending + /// availability. pub fn can_add_candidate_as_potential( &self, candidate: &impl HypotheticalOrConcreteCandidate, @@ -780,6 +808,8 @@ impl FragmentChain { self.check_potential(candidate) } + /// Try adding a seconded candidate, if the candidate has potential. It will never be added to + /// the chain directly in the seconded state, it will only be part of the unconnected storage. pub fn try_adding_seconded_candidate( &mut self, candidate: &CandidateEntry, @@ -792,32 +822,25 @@ impl FragmentChain { if res.is_ok() { // This clone is cheap, as it uses an Arc for the expensive stuff. + // We can't consume the candidate because other fragment chains may use it also. self.unconnected.add_candidate_entry(candidate.clone())?; } res } - // The candidates which are present in `CandidateStorage`, are not part of this chain but could - // become part of this chain in the future. Capped at the max depth minus the existing chain - // length. - // If `ignore_candidate` is supplied and found in storage, it won't be counted. - fn populate_unconnected_potential_candidates(&mut self, old_storage: &CandidateStorage) { - for candidate in old_storage.candidates() { + // Populate the unconnected potential candidate storage starting from a previous storage. + fn populate_unconnected_potential_candidates(&mut self, old_storage: CandidateStorage) { + for candidate in old_storage.into_candidates() { // Sanity check, all pending availability candidates should be already present in the // chain. if self.scope.get_pending_availability(&candidate.candidate_hash).is_some() { continue } - let res = self.can_add_candidate_as_potential(&candidate); - - match res { + match self.can_add_candidate_as_potential(&&candidate) { Ok(()) => { - // This clone is cheap because the expensive stuff is wrapped in an Arc - let Ok(()) = self.unconnected.add_candidate_entry(candidate.clone()) else { - continue - }; + let _ = self.unconnected.add_candidate_entry(candidate); }, // Swallow these errors as they can legitimately happen when pruning stale // candidates. @@ -826,6 +849,8 @@ impl FragmentChain { } } + // Check whether a candidate outputting this head data would introduce a cycle or multiple paths + // to the same state. Trivial 0-length cycles are checked in `CandidateEntry::new`. fn check_cycles_or_invalid_tree(&self, output_head_hash: &Hash) -> Result<(), Error> { // this should catch a cycle where this candidate would point back to the parent of some // candidate in the chain. @@ -841,11 +866,9 @@ impl FragmentChain { Ok(()) } - // Checks the potential of a candidate to be added to the chain in the future. - // Verifies that the relay parent is in scope and not moving backwards and that we're not - // introducing forks or cycles with other candidates in the chain. - // `output_head_hash` is optional because we sometimes make this check before retrieving the - // collation. + // Checks the potential of a candidate to be added to the chain now or in the future. + // It works both with concrete candidates for which we have the full PVD and committed receipt, + // but also does some more basic checks for incomplete candidates (before even fetching them). fn check_potential( &self, candidate: &impl HypotheticalOrConcreteCandidate, @@ -860,14 +883,16 @@ impl FragmentChain { } } + // Check if the relay parent is in scope. let Some(relay_parent) = self.scope.ancestor(&relay_parent) else { return Err(Error::RelayParentNotInScope( relay_parent, self.scope.earliest_relay_parent().hash, )) }; + + // Check if the relay parent moved backwards from the latest candidate pending availability. let earliest_rp_of_pending_availability = self.earliest_relay_parent_pending_availability(); - // relay parent moved backwards. if relay_parent.number < earliest_rp_of_pending_availability.number { return Err(Error::RelayParentPrecedesCandidatePendingAvailability( relay_parent.hash, @@ -875,20 +900,22 @@ impl FragmentChain { )) } - // Check if it's a fork with a backed candidate. + // If it's a fork with a backed candidate in the current chain. if let Some(other_candidate) = self.by_parent_head.get(&parent_head_hash) { if self.scope().get_pending_availability(other_candidate).is_some() { // Cannot accept a fork with a candidate pending availability. return Err(Error::ForkWithCandidatePendingAvailability(*other_candidate)) } - // If the candidate is backed and in the current chain, accept only a candidate with - // a lower hash. + // If the candidate is backed and in the current chain, accept only a candidate + // according to the fork selection rul. if fork_selection_rule(other_candidate, &candidate.candidate_hash()) == Ordering::Less { return Err(Error::ForkChoiceRule(*other_candidate)) } } + // Try seeing if the parent candidate is in the current chain or if it is the latest + // included candidate. If so, get the constraints the candidate must satisfy. let constraints = if let Some(parent_candidate) = self.by_output_head.get(&parent_head_hash) { let Some(parent_candidate_index) = @@ -898,7 +925,8 @@ impl FragmentChain { return Err(Error::ParentCandidateNotFound) }; - if parent_candidate_index > self.scope.max_depth { + // We already have enough candidates in this chain. + if parent_candidate_index >= self.scope.max_depth { return Err(Error::ChainTooLong) } @@ -921,7 +949,7 @@ impl FragmentChain { self.check_cycles_or_invalid_tree(output_head_hash)?; } - // We do additional checks for complete candidates. + // Check against constraints if we have a full concrete candidate. if let (Some(commitments), Some(pvd), Some(validation_code_hash)) = ( candidate.commitments(), candidate.persisted_validation_data(), @@ -935,24 +963,36 @@ impl FragmentChain { pvd, ) .map_err(Error::CheckAgainstConstraints)?; - // TODO: make sure this checks the relay parent to not move backwards. - } else if relay_parent.number < constraints.min_relay_parent_number { - // Otherwise, at least check the relay parent progresses. + } + + if relay_parent.number < constraints.min_relay_parent_number { return Err(Error::RelayParentMovedBackwards) } + if let Some(earliest_rp) = self.earliest_relay_parent() { + if relay_parent.number < earliest_rp.number { + return Err(Error::RelayParentMovedBackwards) + } + } + Ok(()) } + // Once the backable chain was populated, trim the forks generated by candidates which + // are not present in the best chain. Fan out this into a full breadth-first search. fn trim_uneligible_forks(&self, storage: &mut CandidateStorage) { + // Start out with the candidates in the chain. They are all valid candidates. let mut queue: VecDeque<_> = self.chain.iter().map(|c| (c.parent_head_data_hash, true)).collect(); + // To make sure that cycles don't make us loop forever, keep track of the visited parent + // heads. let mut visited = HashSet::new(); while let Some((parent, parent_has_potential)) = queue.pop_front() { visited.insert(parent); let Some(children) = storage.by_parent_head.get(&parent) else { continue }; + // Cannot remove while iterating so store them here temporarily. let mut to_remove = vec![]; for child_hash in children.iter() { @@ -964,9 +1004,14 @@ impl FragmentChain { continue } + // Only keep a candidate if its full ancestry was already kept as potential and this + // candidate itself has potential. if parent_has_potential && self.check_potential(&child).is_ok() { queue.push_back((child.output_head_data_hash, true)); } else { + // Otherwise, remove this candidate and continue looping for its children, but + // mark the parent's potential as `false`. We only want to remove its + // children. to_remove.push(*child_hash); queue.push_back((child.output_head_data_hash, false)); } @@ -978,10 +1023,10 @@ impl FragmentChain { } } - // Populate the fragment chain with candidates from CandidateStorage. - // Can be called by the constructor or when introducing a new candidate. - // If we're introducing a new candidate onto an existing chain, we may introduce more than one, - // since we may connect already existing candidates to the chain. + // Populate the fragment chain with candidates from the supplied `CandidateStorage`. + // Can be called by the constructor or when backing a new candidate. + // When this is called, it may cause a the previous chain to be completely erased or it may add + // more than one candidate. fn populate_chain(&mut self, storage: &mut CandidateStorage) { let mut cumulative_modifications = if let Some(last_candidate) = self.chain.last() { last_candidate.cumulative_modifications.clone() @@ -1011,13 +1056,13 @@ impl FragmentChain { }; let required_head_hash = child_constraints.required_parent.hash(); - // Even though we don't allow parachain forks under the same active leaf, they may still - // appear under different relay chain forks, hence the iterator below. + // Select the few possible backed/backable children which can be added to the chain + // right now. let possible_children = storage .possible_backed_para_children(&required_head_hash) .filter_map(|candidate| { - // Add one node to chain if + // Only select a candidates if: // 1. it does not introduce a fork or a cycle. // 2. parent hash is correct. // 3. relay-parent does not move backwards. @@ -1099,6 +1144,7 @@ impl FragmentChain { )) }); + // Choose the best candidate. let best_candidate = possible_children.min_by(|(_, ref child1, _, _), (_, ref child2, _, _)| { // Always pick a candidate pending availability as best. @@ -1115,6 +1161,7 @@ impl FragmentChain { if let Some((fragment, candidate_hash, output_head_data_hash, parent_head_data_hash)) = best_candidate { + // Remove the candidate from storage. storage.remove_candidate(&candidate_hash); // Update the cumulative constraint modifications. @@ -1130,9 +1177,9 @@ impl FragmentChain { cumulative_modifications: cumulative_modifications.clone(), }; + // Add the candidate to the chain now. self.chain.push(node); self.candidates.insert(candidate_hash); - // We've already checked for forks and cycles. self.by_parent_head.insert(parent_head_data_hash, candidate_hash); self.by_output_head.insert(output_head_data_hash, candidate_hash); } else { @@ -1141,13 +1188,18 @@ impl FragmentChain { } } + /// Mark a candidate as backed. Return `None` if the candidate is not part of the unconnected + /// storage. + /// This will trigger a recreation of the best backable chain. pub fn candidate_backed(&self, newly_backed_candidate: &CandidateHash) -> Option { + // Get the storage containing both the backable chain and the unconnected candidates. let mut old_storage = self.as_candidate_storage(); if !old_storage.mark_backed(newly_backed_candidate) { return None } - Some(Self::populate(self.scope.clone(), &mut old_storage)) + // Repopulate. + Some(Self::populate(self.scope.clone(), old_storage)) } } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 61b7757e979c..9728789de150 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -339,7 +339,7 @@ async fn handle_active_leaves_update( } // Finally, populate the fragment chain. - let chain = FragmentChain::populate(scope, &mut new_storage); + let chain = FragmentChain::populate(scope, new_storage); gum::trace!( target: LOG_TARGET, From 2993cd7afd501ceba5d5e07f0c301fd40d544e2c Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 5 Jul 2024 17:38:59 +0300 Subject: [PATCH 11/56] more comments --- .../src/fragment_chain/mod.rs | 92 +++++++++++++------ 1 file changed, 62 insertions(+), 30 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 5cad5722119d..98b96dc0449a 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -18,41 +18,56 @@ //! //! # Overview //! -//! This module exposes two main types: [`FragmentChain`] and [`CandidateStorage`] which are meant -//! to be used in close conjunction. Each fragment chain is associated with a particular -//! relay-parent and each node in the chain represents a candidate. Each parachain has a single -//! candidate storage, but can have one chain for each relay chain block in the view. -//! Therefore, the same candidate can be present in multiple fragment chains of a parachain. One of -//! the purposes of the candidate storage is to deduplicate the large candidate data that is being -//! referenced from multiple fragment chains. +//! The main type exposed by this module is the [`FragmentChain`]. //! -//! A chain has an associated [`Scope`] which defines limits on candidates within the chain. -//! Candidates themselves have their own [`Constraints`] which are either the constraints from the -//! scope, or, if there are previous nodes in the chain, a modified version of the previous -//! candidate's constraints. +//! Each fragment chain is associated with a particular relay-parent (an active leaf) and has a +//! [`Scope`], which contains the allowed relay parents (up to `allowed_ancestry_len`), the pending +//! availability candidates and base constraints derived from the latest included candidate. Each +//! parachain has a single `FragmentChain` for each active leaf where it's scheduled. //! -//! Another use of the `CandidateStorage` is to keep a record of candidates which may not be yet -//! included in any chain, but which may become part of a chain in the future. This is needed for -//! elastic scaling, so that we may parallelise the backing process across different groups. As long -//! as some basic constraints are not violated by an unconnected candidate (like the relay parent -//! being in scope), we proceed with the backing process, hoping that its predecessors will be -//! backed soon enough. This is commonly called a potential candidate. Note that not all potential -//! candidates will be maintained in the CandidateStorage. The total number of connected + potential -//! candidates will be at most max_candidate_depth + 1. +//! A fragment chain consists mainly of the current best backable chain (we'll call this the best +//! chain) and a storage of unconnected potential candidates (we'll call this the unconnected +//! storage). +//! +//! The best chain contains all the candidates pending availability and a subsequent chain +//! of candidates that have reached the backing quorum and are better than any other backable forks +//! according to the fork selection rule (more on this rule later). It has a length of size at most +//! `max_candidate_depth + 1`. +//! +//! The unconnected storage keeps a record of seconded/backable candidates that may be +//! added to the best chain in the future. +//! Once a candidate is seconded, it becomes part of this unconnected storage. +//! Only after it is backed it may be added to the best chain (but not neccessarily). It's only +//! added if it builds on the latest candidate in the chain and if there isn't a better backable +//! candidate according to the fork selection rule. +//! +//! An important thing to note is that the candidates present in the unconnected storage may have +//! any/no relationship between them. In other words, they may form N trees and may even form +//! cycles. This is needed so that we may begin validating candidates for which we don't yet know +//! their parent (so we may parallelise the backing process across different groups for elastic +//! scaling) and so that we accept parachain forks. +//! +//! We accept parachain forks only until reaching the backing quorum. After that, we assume all +//! validators pick the same fork accroding to the fork selection rule. If we decided to not accept +//! parachain forks, candidates could end up getting only half of the backing votes or even less +//! (for forks of larger arity). This would affect the validator rewards. Still, we don't guarantee +//! that a fork-producing parachains will be able to fully use elastic scaling. +//! +//! Once a candidate is backed and becomes part of the best chain, we can trim from the +//! unconnected storage candidates which constitute forks on the best chain and no longer have +//! potential. //! //! This module also makes use of types provided by the Inclusion Emulator module, such as //! [`Fragment`] and [`Constraints`]. These perform the actual job of checking for validity of //! prospective fragments. //! -//! # Parachain forks +//! # Fork choice rule //! -//! Parachains are expected to not create forks, hence the use of fragment chains as opposed to -//! fragment trees. If parachains do create forks, their performance in regards to async backing and -//! elastic scaling will suffer, because different validators will have different views of the -//! future. +//! The motivation for the fork choice rule is described in the previous chapter. //! -//! This is a compromise we can make - collators which want to use async backing and elastic scaling -//! need to cooperate for the highest throughput. +//! The current rule is: choose the candidate with the lower candidate hash. +//! The candidate hash is quite random and finding a candidate with a lower hash in order to favour +//! it would essentially mean solving a proof of work problem. //! //! # Parachain cycles //! @@ -65,23 +80,40 @@ //! resolved by having candidates reference their parent by candidate hash. //! //! However, dealing with cycles increases complexity during the backing/inclusion process for no -//! practical reason. Therefore, fragment chains will not accept such candidates. +//! practical reason. +//! These cycles may be accepted by fragment chains while candidates are part of the unconnected +//! storage, but they will definitely not make it to the best chain. //! //! On the other hand, enforcing that a parachain will NEVER be acyclic would be very complicated //! (looping through the entire parachain's history on every new candidate or changing the candidate //! receipt to reference the parent's candidate hash). //! +//! Therefore, we don't provide a guarantee that a cycle-producing parachain will work (although in +//! practice they probably will if the cycle length is larger than the number of assigned cores +//! multiplied by two). +//! //! # Spam protection //! -//! As long as the [`CandidateStorage`] has bounded input on the number of candidates supplied, -//! [`FragmentChain`] complexity is bounded. This means that higher-level code needs to be selective -//! about limiting the amount of candidates that are considered. +//! As long as the supplied number of candidates is bounded, [`FragmentChain`] complexity is +//! bounded. This means that higher-level code needs to be selective about limiting the amount of +//! candidates that are considered. +//! +//! Practically speaking, the collator-protocol will not allow more than `max_candidate_depth + 1` +//! collations to be fetched at a relay parent and statement-distribution will not allow more than +//! `max_candidate_depth + 1` seconded candidates at a relay parent per each validator in the +//! backing group. Considering the `allowed_ancestry_len` configuration value, the number of +//! candidates in a `FragmentChain` (including its unconnected storage) should not exceed: +//! +//! `allowed_ancestry_len * (max_candidate_depth + 1) * backing_group_size`. //! //! The code in this module is not designed for speed or efficiency, but conceptual simplicity. //! Our assumption is that the amount of candidates and parachains we consider will be reasonably //! bounded and in practice will not exceed a few thousand at any time. This naive implementation //! will still perform fairly well under these conditions, despite being somewhat wasteful of //! memory. +//! +//! Still, the expensive candidate data (CandidateCommitments) are wrapped in an `Arc` and shared +//! across fragment chains of the same para on different active leaves. #[cfg(test)] mod tests; From a8ee808a00bc21c01c8a11b09a2e68265e65f087 Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 9 Jul 2024 10:14:59 +0300 Subject: [PATCH 12/56] update inclusion emulator docs --- .../src/inclusion_emulator/mod.rs | 54 ++++--------------- 1 file changed, 9 insertions(+), 45 deletions(-) diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index cb16369a9efe..3df0836ccf04 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -40,8 +40,8 @@ use polkadot_node_subsystem::messages::HypotheticalCandidate; /// /// # Usage /// -/// It's expected that the users of this module will be building up chains of -/// [`Fragment`]s and consistently pruning and adding to the chains. +/// It's expected that the users of this module will be building up chains or trees of +/// [`Fragment`]s and consistently pruning and adding to them. /// /// ## Operating Constraints /// @@ -57,55 +57,19 @@ use polkadot_node_subsystem::messages::HypotheticalCandidate; /// /// ## Fragment Chains /// -/// For simplicity and practicality, we expect that collators of the same parachain are -/// cooperating and don't create parachain forks or cycles on the same relay chain active leaf. -/// Therefore, higher-level code should maintain one fragment chain for each active leaf (not a -/// fragment tree). If parachains do create forks, their performance in regards to async -/// backing and elastic scaling will suffer, because different validators will have different -/// predictions of the future. +/// For the sake of this module, we don't care how higher-level code is managing parachain +/// fragments, whether or not they're kept as a chain or tree. In reality, +/// prospective-parachains is maintaining for every active leaf, a chain of the "best" backable +/// candidates and a storage of potential candidates which may be added to this chain in the +/// future. /// /// As the relay-chain grows, some predictions come true and others come false. -/// And new predictions get made. These three changes correspond distinctly to the -/// 3 primary operations on fragment chains. +/// And new predictions get made. Higher-level code is responsible for adding and pruning the +/// fragments chains. /// /// Avoiding fragment-chain blowup is beyond the scope of this module. Higher-level must ensure /// proper spam protection. /// -/// ### Pruning Fragment Chains -/// -/// When the relay-chain advances, we want to compare the new constraints of that relay-parent -/// to the root of the fragment chain we have. There are 3 cases: -/// -/// 1. The root fragment is still valid under the new constraints. In this case, we do nothing. -/// This is the "prediction still uncertain" case. (Corresponds to some candidates still -/// being pending availability). -/// -/// 2. The root fragment (potentially along with a number of descendants) is invalid under the -/// new constraints because it has been included by the relay-chain. In this case, we can -/// discard the included chain and split & re-root the chain under its descendants and -/// compare to the new constraints again. This is the "prediction came true" case. -/// -/// 3. The root fragment becomes invalid under the new constraints for any reason (if for -/// example the parachain produced a fork and the block producer picked a different -/// candidate to back). In this case we can discard the entire fragment chain. This is the -/// "prediction came false" case. -/// -/// This is all a bit of a simplification because it assumes that the relay-chain advances -/// without forks and is finalized instantly. In practice, the set of fragment-chains needs to -/// be observable from the perspective of a few different possible forks of the relay-chain and -/// not pruned too eagerly. -/// -/// Note that the fragments themselves don't need to change and the only thing we care about -/// is whether the predictions they represent are still valid. -/// -/// ### Extending Fragment Chains -/// -/// As predictions fade into the past, new ones should be stacked on top. -/// -/// Every new relay-chain block is an opportunity to make a new prediction about the future. -/// Higher-level logic should decide whether to build upon an existing chain or whether -/// to create a new fragment-chain. -/// /// ### Code Upgrades /// /// Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade From 49634ea0464cbfcd1f3c1b750383be9b80df977d Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 9 Jul 2024 10:44:49 +0300 Subject: [PATCH 13/56] metrics --- .../src/fragment_chain/mod.rs | 6 +- .../core/prospective-parachains/src/lib.rs | 28 +++++++- .../prospective-parachains/src/metrics.rs | 68 +++++++++++-------- 3 files changed, 69 insertions(+), 33 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 98b96dc0449a..140ddc206180 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -323,7 +323,6 @@ impl CandidateStorage { }) } - #[cfg(test)] fn len(&self) -> usize { self.by_candidate_hash.len() } @@ -648,6 +647,11 @@ impl FragmentChain { self.candidates.len() } + /// Returns the number of candidates in unconnected potential storage. + pub fn unconnected_len(&self) -> usize { + self.unconnected.len() + } + /// Whether the candidate is part of the best backable chain. pub fn contains_candidate(&self, candidate: &CandidateHash) -> bool { self.candidates.contains(candidate) diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 9728789de150..28ca198bfff9 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -141,7 +141,7 @@ async fn run_iteration( ProspectiveParachainsMessage::IntroduceSecondedCandidate(request, tx) => handle_introduce_seconded_candidate(view, request, tx, metrics).await, ProspectiveParachainsMessage::CandidateBacked(para, candidate_hash) => - handle_candidate_backed(view, para, candidate_hash).await, + handle_candidate_backed(view, para, candidate_hash, metrics).await, ProspectiveParachainsMessage::GetBackableCandidates( relay_parent, para, @@ -165,7 +165,7 @@ async fn handle_active_leaves_update( ctx: &mut Context, view: &mut View, update: ActiveLeavesUpdate, - _metrics: &Metrics, + metrics: &Metrics, ) -> JfyiErrorResult<()> { // For each active leaf: // - determine the scheduled paras @@ -176,6 +176,8 @@ async fn handle_active_leaves_update( // Only then, clean up inactive leaves. They must be cleaned only after new leaves are // processed, because we may reuse their candidates. + let _timer = metrics.time_handle_active_leaves_update(); + let mut temp_header_cache = HashMap::new(); for activated in update.activated.into_iter() { let hash = activated.hash; @@ -368,6 +370,19 @@ async fn handle_active_leaves_update( view.active_leaves.remove(deactivated); } + if metrics.0.is_some() { + let mut connected = 0; + let mut unconnected = 0; + for RelayBlockViewData { fragment_chains } in view.active_leaves.values() { + for chain in fragment_chains.values() { + connected += chain.len(); + unconnected += chain.unconnected_len(); + } + } + + metrics.record_candidate_count(connected as u64, unconnected as u64); + } + Ok(()) } @@ -531,7 +546,14 @@ async fn handle_introduce_seconded_candidate( let _ = tx.send(added); } -async fn handle_candidate_backed(view: &mut View, para: ParaId, candidate_hash: CandidateHash) { +async fn handle_candidate_backed( + view: &mut View, + para: ParaId, + candidate_hash: CandidateHash, + metrics: &Metrics, +) { + let _timer = metrics.time_candidate_backed(); + let mut found_candidate = false; let mut found_para = false; for (leaf, leaf_data) in view.active_leaves.iter_mut() { diff --git a/polkadot/node/core/prospective-parachains/src/metrics.rs b/polkadot/node/core/prospective-parachains/src/metrics.rs index 5abd9f56f306..5708e0b54308 100644 --- a/polkadot/node/core/prospective-parachains/src/metrics.rs +++ b/polkadot/node/core/prospective-parachains/src/metrics.rs @@ -22,10 +22,11 @@ use polkadot_node_subsystem_util::metrics::{ #[derive(Clone)] pub(crate) struct MetricsInner { - prune_view_candidate_storage: prometheus::Histogram, - introduce_seconded_candidate: prometheus::Histogram, - hypothetical_membership: prometheus::Histogram, - candidate_storage_count: prometheus::GaugeVec, + time_active_leaves_update: prometheus::Histogram, + time_introduce_seconded_candidate: prometheus::Histogram, + time_candidate_backed: prometheus::Histogram, + time_hypothetical_membership: prometheus::Histogram, + candidate_count: prometheus::GaugeVec, } /// Candidate backing metrics. @@ -33,13 +34,11 @@ pub(crate) struct MetricsInner { pub struct Metrics(pub(crate) Option); impl Metrics { - /// Provide a timer for handling `prune_view_candidate_storage` which observes on drop. - pub fn time_prune_view_candidate_storage( + /// Provide a timer for handling `ActiveLeavesUpdate` which observes on drop. + pub fn time_handle_active_leaves_update( &self, ) -> Option { - self.0 - .as_ref() - .map(|metrics| metrics.prune_view_candidate_storage.start_timer()) + self.0.as_ref().map(|metrics| metrics.time_active_leaves_update.start_timer()) } /// Provide a timer for handling `IntroduceSecondedCandidate` which observes on drop. @@ -48,29 +47,33 @@ impl Metrics { ) -> Option { self.0 .as_ref() - .map(|metrics| metrics.introduce_seconded_candidate.start_timer()) + .map(|metrics| metrics.time_introduce_seconded_candidate.start_timer()) + } + + /// Provide a timer for handling `CandidateBacked` which observes on drop. + pub fn time_candidate_backed(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.time_candidate_backed.start_timer()) } /// Provide a timer for handling `GetHypotheticalMembership` which observes on drop. pub fn time_hypothetical_membership_request( &self, ) -> Option { - self.0.as_ref().map(|metrics| metrics.hypothetical_membership.start_timer()) + self.0 + .as_ref() + .map(|metrics| metrics.time_hypothetical_membership.start_timer()) } - /// Record the size of the candidate storage. First param is the connected candidates count, - /// second param is the unconnected candidates count. - pub fn record_candidate_storage_size(&self, connected_count: u64, unconnected_count: u64) { + /// Record number of candidates across all fragment chains. First param is the connected + /// candidates count, second param is the unconnected candidates count. + pub fn record_candidate_count(&self, connected_count: u64, unconnected_count: u64) { self.0.as_ref().map(|metrics| { - metrics - .candidate_storage_count - .with_label_values(&["connected"]) - .set(connected_count) + metrics.candidate_count.with_label_values(&["connected"]).set(connected_count) }); self.0.as_ref().map(|metrics| { metrics - .candidate_storage_count + .candidate_count .with_label_values(&["unconnected"]) .set(unconnected_count) }); @@ -80,32 +83,39 @@ impl Metrics { impl metrics::Metrics for Metrics { fn try_register(registry: &prometheus::Registry) -> Result { let metrics = MetricsInner { - prune_view_candidate_storage: prometheus::register( + time_active_leaves_update: prometheus::register( prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( - "polkadot_parachain_prospective_parachains_prune_view_candidate_storage", - "Time spent within `prospective_parachains::prune_view_candidate_storage`", + "polkadot_parachain_prospective_parachains_time_active_leaves_update", + "Time spent within `prospective_parachains::handle_active_leaves_update`", ))?, registry, )?, - introduce_seconded_candidate: prometheus::register( + time_introduce_seconded_candidate: prometheus::register( prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( - "polkadot_parachain_prospective_parachains_introduce_seconded_candidate", + "polkadot_parachain_prospective_parachains_time_introduce_seconded_candidate", "Time spent within `prospective_parachains::handle_introduce_seconded_candidate`", ))?, registry, )?, - hypothetical_membership: prometheus::register( + time_candidate_backed: prometheus::register( + prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( + "polkadot_parachain_prospective_parachains_time_candidate_backed", + "Time spent within `prospective_parachains::handle_candidate_backed`", + ))?, + registry, + )?, + time_hypothetical_membership: prometheus::register( prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( - "polkadot_parachain_prospective_parachains_hypothetical_membership", + "polkadot_parachain_prospective_parachains_time_hypothetical_membership", "Time spent responding to `GetHypotheticalMembership`", ))?, registry, )?, - candidate_storage_count: prometheus::register( + candidate_count: prometheus::register( GaugeVec::new( Opts::new( - "polkadot_parachain_prospective_parachains_candidate_storage_count", - "Number of candidates present in the candidate storage, split by connected and unconnected" + "polkadot_parachain_prospective_parachains_candidate_count", + "Number of candidates present across all fragment chains, split by connected and unconnected" ), &["type"], )?, From 2ab16a85df9618ec5238c1b25e1b83127705dd27 Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 9 Jul 2024 11:23:08 +0300 Subject: [PATCH 14/56] rename chain to best_chain and group it in a struct for better readability --- .../src/fragment_chain/mod.rs | 125 ++++++++++-------- .../core/prospective-parachains/src/lib.rs | 10 +- 2 files changed, 75 insertions(+), 60 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 140ddc206180..1727ca4c61cd 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -578,6 +578,37 @@ impl FragmentNode { } } +/// A candidate chain of backed/backable candidates. +/// Includes the candidates pending availability and candidates which may be backed on-chain. +#[derive(Default)] +struct BackedChain { + // Holds the candidate chain. + chain: Vec, + // Index from head data hash to the candidate hash with that head data as a parent. + // Only contains the candidates present in the `chain`. + by_parent_head: HashMap, + // Index from head data hash to the candidate hash outputting that head data. + // Only contains the candidates present in the `chain`. + by_output_head: HashMap, + // A set of the candidate hashes in the `chain`. + candidates: HashSet, +} + +impl BackedChain { + fn push(&mut self, candidate: FragmentNode) { + self.candidates.insert(candidate.candidate_hash); + self.by_parent_head + .insert(candidate.parent_head_data_hash, candidate.candidate_hash); + self.by_output_head + .insert(candidate.output_head_data_hash, candidate.candidate_hash); + self.chain.push(candidate); + } + + fn contains(&self, hash: &CandidateHash) -> bool { + self.candidates.contains(hash) + } +} + /// This is the fragment chain specific to an active leaf. /// /// It holds the current best backable candidate chain, as well as potential candidates @@ -591,17 +622,7 @@ pub(crate) struct FragmentChain { // The current best chain of backable candidates. It only contains candidates which build on // top of each other and which have reached the backing quorum. In the presence of potential // forks, this chain will pick a fork according to the `fork_selection_rule`. - chain: Vec, - - // A set of the candidate hashes in the `chain`. - candidates: HashSet, - - // Index from head data hash to the candidate hash with that head data as a parent. - // Only contains the candidates present in the `chain`. - by_parent_head: HashMap, - // Index from head data hash to the candidate hash outputting that head data. - // Only contains the candidates present in the `chain`. - by_output_head: HashMap, + best_chain: BackedChain, // The potential candidate storage. Contains candidates which are not yet part of the `chain` // but may become in the future. These can form any tree shape as well as contain any @@ -617,10 +638,7 @@ impl FragmentChain { // Initialize as empty let mut fragment_chain = Self { scope, - chain: Vec::new(), - candidates: HashSet::new(), - by_parent_head: HashMap::new(), - by_output_head: HashMap::new(), + best_chain: BackedChain::default(), unconnected: CandidateStorage::default(), }; @@ -643,8 +661,8 @@ impl FragmentChain { } /// Returns the number of candidates in the best backable chain. - pub fn len(&self) -> usize { - self.candidates.len() + pub fn best_chain_len(&self) -> usize { + self.best_chain.chain.len() } /// Returns the number of candidates in unconnected potential storage. @@ -652,19 +670,14 @@ impl FragmentChain { self.unconnected.len() } - /// Whether the candidate is part of the best backable chain. - pub fn contains_candidate(&self, candidate: &CandidateHash) -> bool { - self.candidates.contains(candidate) - } - /// Whether the candidate exists as part of the unconnected potential candidates. pub fn contains_unconnected_candidate(&self, candidate: &CandidateHash) -> bool { self.unconnected.contains(candidate) } /// Return a vector of the chain's candidate hashes, in-order. - pub fn to_vec(&self) -> Vec { - self.chain.iter().map(|candidate| candidate.candidate_hash).collect() + pub fn best_chain_vec(&self) -> Vec { + self.best_chain.chain.iter().map(|candidate| candidate.candidate_hash).collect() } /// Return a vector of the unconnected potential candidate hashes, in arbitrary order. @@ -674,7 +687,7 @@ impl FragmentChain { /// Return whether this candidate is backed in this chain or the unconnected storage. pub fn is_candidate_backed(&self, hash: &CandidateHash) -> bool { - self.candidates.contains(hash) || + self.best_chain.candidates.contains(hash) || matches!( self.unconnected.by_candidate_hash.get(hash), Some(candidate) if candidate.state == CandidateState::Backed @@ -686,7 +699,7 @@ impl FragmentChain { pub fn as_candidate_storage(&self) -> CandidateStorage { let mut storage = self.unconnected.clone(); - for candidate in self.chain.iter() { + for candidate in self.best_chain.chain.iter() { let _ = storage.add_candidate_entry(CandidateEntry { candidate_hash: candidate.candidate_hash, parent_head_data_hash: candidate.parent_head_data_hash, @@ -710,13 +723,14 @@ impl FragmentChain { // Cheaply check if the head data is in the best backable chain. let has_head_data_in_chain = self + .best_chain .by_parent_head .get(head_data_hash) - .or_else(|| self.by_output_head.get(head_data_hash)) + .or_else(|| self.best_chain.by_output_head.get(head_data_hash)) .is_some(); if has_head_data_in_chain { - return self.chain.iter().find_map(|candidate| { + return self.best_chain.chain.iter().find_map(|candidate| { if &candidate.parent_head_data_hash == head_data_hash { Some( candidate @@ -753,10 +767,11 @@ impl FragmentChain { } let base_pos = self.find_ancestor_path(ancestors); - let actual_end_index = std::cmp::min(base_pos + (count as usize), self.chain.len()); + let actual_end_index = + std::cmp::min(base_pos + (count as usize), self.best_chain.chain.len()); let mut res = Vec::with_capacity(actual_end_index - base_pos); - for elem in &self.chain[base_pos..actual_end_index] { + for elem in &self.best_chain.chain[base_pos..actual_end_index] { // Only supply candidates which are not yet pending availability. `ancestors` should // have already contained them, but check just in case. if self.scope.get_pending_availability(&elem.candidate_hash).is_none() { @@ -773,11 +788,11 @@ impl FragmentChain { // Stops when the ancestors are all used or when a node in the chain is not present in the // ancestor set. Returns the index in the chain were the search stopped. fn find_ancestor_path(&self, mut ancestors: Ancestors) -> usize { - if self.chain.is_empty() { + if self.best_chain.chain.is_empty() { return 0; } - for (index, candidate) in self.chain.iter().enumerate() { + for (index, candidate) in self.best_chain.chain.iter().enumerate() { if !ancestors.remove(&candidate.candidate_hash) { return index } @@ -785,7 +800,7 @@ impl FragmentChain { // This means that we found the entire chain in the ancestor set. There won't be anything // left to back. - self.chain.len() + self.best_chain.chain.len() } // Return the earliest relay parent a new candidate can have in order to be added to the chain @@ -794,7 +809,7 @@ impl FragmentChain { // may have a relay parent which is out of scope. Special handling is needed in that case. // `None` is returned if the candidate's relay parent info cannot be found. fn earliest_relay_parent(&self) -> Option { - if let Some(last_candidate) = self.chain.last() { + if let Some(last_candidate) = self.best_chain.chain.last() { self.scope.ancestor(&last_candidate.relay_parent()).or_else(|| { // if the relay-parent is out of scope _and_ it is in the chain, // it must be a candidate pending availability. @@ -811,7 +826,8 @@ impl FragmentChain { // the chain. This is the relay parent of the last candidate pending availability or the // earliest relay parent in scope. fn earliest_relay_parent_pending_availability(&self) -> RelayChainBlockInfo { - self.chain + self.best_chain + .chain .iter() .rev() .find_map(|candidate| { @@ -837,7 +853,7 @@ impl FragmentChain { return Err(Error::CandidateAlreadyPendingAvailability) } - if self.candidates.contains(&candidate_hash) || self.unconnected.contains(&candidate_hash) { + if self.best_chain.contains(&candidate_hash) || self.unconnected.contains(&candidate_hash) { return Err(Error::CandidateAlreadyKnown) } @@ -890,12 +906,12 @@ impl FragmentChain { fn check_cycles_or_invalid_tree(&self, output_head_hash: &Hash) -> Result<(), Error> { // this should catch a cycle where this candidate would point back to the parent of some // candidate in the chain. - if self.by_parent_head.contains_key(output_head_hash) { + if self.best_chain.by_parent_head.contains_key(output_head_hash) { return Err(Error::Cycle) } // multiple paths to the same state, which can't happen for a chain. - if self.by_output_head.contains_key(output_head_hash) { + if self.best_chain.by_output_head.contains_key(output_head_hash) { return Err(Error::MultiplePaths) } @@ -937,7 +953,7 @@ impl FragmentChain { } // If it's a fork with a backed candidate in the current chain. - if let Some(other_candidate) = self.by_parent_head.get(&parent_head_hash) { + if let Some(other_candidate) = self.best_chain.by_parent_head.get(&parent_head_hash) { if self.scope().get_pending_availability(other_candidate).is_some() { // Cannot accept a fork with a candidate pending availability. return Err(Error::ForkWithCandidatePendingAvailability(*other_candidate)) @@ -952,10 +968,11 @@ impl FragmentChain { // Try seeing if the parent candidate is in the current chain or if it is the latest // included candidate. If so, get the constraints the candidate must satisfy. - let constraints = if let Some(parent_candidate) = self.by_output_head.get(&parent_head_hash) + let constraints = if let Some(parent_candidate) = + self.best_chain.by_output_head.get(&parent_head_hash) { let Some(parent_candidate_index) = - self.chain.iter().position(|c| &c.candidate_hash == parent_candidate) + self.best_chain.chain.iter().position(|c| &c.candidate_hash == parent_candidate) else { // Should never really happen. return Err(Error::ParentCandidateNotFound) @@ -966,7 +983,7 @@ impl FragmentChain { return Err(Error::ChainTooLong) } - let parent_candidate = &self.chain[parent_candidate_index]; + let parent_candidate = &self.best_chain.chain[parent_candidate_index]; self.scope .base_constraints .apply_modifications(&parent_candidate.cumulative_modifications) @@ -1019,7 +1036,7 @@ impl FragmentChain { fn trim_uneligible_forks(&self, storage: &mut CandidateStorage) { // Start out with the candidates in the chain. They are all valid candidates. let mut queue: VecDeque<_> = - self.chain.iter().map(|c| (c.parent_head_data_hash, true)).collect(); + self.best_chain.chain.iter().map(|c| (c.parent_head_data_hash, true)).collect(); // To make sure that cycles don't make us loop forever, keep track of the visited parent // heads. let mut visited = HashSet::new(); @@ -1064,15 +1081,16 @@ impl FragmentChain { // When this is called, it may cause a the previous chain to be completely erased or it may add // more than one candidate. fn populate_chain(&mut self, storage: &mut CandidateStorage) { - let mut cumulative_modifications = if let Some(last_candidate) = self.chain.last() { - last_candidate.cumulative_modifications.clone() - } else { - ConstraintModifications::identity() - }; + let mut cumulative_modifications = + if let Some(last_candidate) = self.best_chain.chain.last() { + last_candidate.cumulative_modifications.clone() + } else { + ConstraintModifications::identity() + }; let Some(mut earliest_rp) = self.earliest_relay_parent() else { return }; loop { - if self.chain.len() > self.scope.max_depth { + if self.best_chain.chain.len() > self.scope.max_depth { break; } @@ -1125,7 +1143,7 @@ impl FragmentChain { // when the parent is a pending availability candidate as well, but // only other pending candidates can have a relay parent out of scope. let min_relay_parent_number = pending - .map(|p| match self.chain.len() { + .map(|p| match self.best_chain.chain.len() { 0 => p.relay_parent.number, _ => earliest_rp.number, }) @@ -1138,7 +1156,7 @@ impl FragmentChain { // don't add candidates if they're already present in the chain. // this can never happen, as candidates can only be duplicated if there's a // cycle and we shouldn't have allowed for a cycle to be chained. - if self.contains_candidate(&candidate.candidate_hash) { + if self.best_chain.contains(&candidate.candidate_hash) { return None } @@ -1214,10 +1232,7 @@ impl FragmentChain { }; // Add the candidate to the chain now. - self.chain.push(node); - self.candidates.insert(candidate_hash); - self.by_parent_head.insert(parent_head_data_hash, candidate_hash); - self.by_output_head.insert(output_head_data_hash, candidate_hash); + self.best_chain.push(node); } else { break } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 28ca198bfff9..4def739b63bf 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -348,8 +348,8 @@ async fn handle_active_leaves_update( relay_parent = ?hash, para_id = ?para, "Populated fragment chain with {} candidates: {:?}", - chain.len(), - chain.to_vec() + chain.best_chain_len(), + chain.best_chain_vec() ); gum::trace!( @@ -375,7 +375,7 @@ async fn handle_active_leaves_update( let mut unconnected = 0; for RelayBlockViewData { fragment_chains } in view.active_leaves.values() { for chain in fragment_chains.values() { - connected += chain.len(); + connected += chain.best_chain_len(); unconnected += chain.unconnected_len(); } } @@ -577,7 +577,7 @@ async fn handle_candidate_backed( relay_parent = ?leaf, para_id = ?para, "Candidate backed. Candidate chain for para: {:?}", - maybe_new_chain.as_ref().unwrap_or(chain).to_vec() + maybe_new_chain.as_ref().unwrap_or(chain).best_chain_vec() ); gum::trace!( @@ -656,7 +656,7 @@ fn answer_get_backable_candidates( ?relay_parent, para_id = ?para, "Candidate chain for para: {:?}", - chain.to_vec() + chain.best_chain_vec() ); gum::trace!( From fdd8e546a58b2c918b144df4e3419b21e816e03a Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 9 Jul 2024 12:17:49 +0300 Subject: [PATCH 15/56] nits --- .../network/collator-protocol/src/validator_side/collation.rs | 3 +++ polkadot/node/network/statement-distribution/src/v2/mod.rs | 4 ---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index 96ffe9f13db3..2bb8e08e3c93 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -148,6 +148,9 @@ pub fn fetched_collation_sanity_check( .prospective_candidate .map_or(false, |pc| pc.candidate_hash() != fetched.hash()) { + // Note: it's important that we check for this and punish a collator that advertises a false + // candidate hash, because it can be misused to prioritise a specific collation, according + // to the fork choice rule in Prospective Parachains. Err(SecondingError::CandidateHashMismatch) } else if maybe_parent_head_and_hash.map_or(false, |(head, hash)| head.hash() != hash) { Err(SecondingError::ParentHeadDataMismatch) diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 77cd59d69fbd..e0d75b574d50 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -2199,10 +2199,6 @@ async fn fragment_chain_update_inner( Some(h) => h, }; - if hypotheticals.is_empty() { - return - } - // 2. find out which are in the frontier gum::debug!( target: LOG_TARGET, From 297781bc0f4f5ee03ca4809619b804ce80d14ba0 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 11 Jul 2024 09:47:21 +0300 Subject: [PATCH 16/56] start modifying tests --- .../src/fragment_chain/tests.rs | 2921 +++++++++-------- .../core/prospective-parachains/src/tests.rs | 33 - 2 files changed, 1473 insertions(+), 1481 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 26ee94d59d8e..c7b7538e7bb7 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -17,7 +17,9 @@ use super::*; use assert_matches::assert_matches; use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; -use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; +use polkadot_primitives::{ + BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData, Id as ParaId, +}; use polkadot_primitives_test_helpers as test_helpers; fn make_constraints( @@ -85,7 +87,6 @@ fn make_committed_candidate( #[test] fn scope_rejects_ancestors_that_skip_blocks() { - let para_id = ParaId::from(5u32); let relay_parent = RelayChainBlockInfo { number: 10, hash: Hash::repeat_byte(10), @@ -104,7 +105,6 @@ fn scope_rejects_ancestors_that_skip_blocks() { assert_matches!( Scope::with_ancestors( - para_id, relay_parent, base_constraints, pending_availability, @@ -117,7 +117,6 @@ fn scope_rejects_ancestors_that_skip_blocks() { #[test] fn scope_rejects_ancestor_for_0_block() { - let para_id = ParaId::from(5u32); let relay_parent = RelayChainBlockInfo { number: 0, hash: Hash::repeat_byte(0), @@ -136,7 +135,6 @@ fn scope_rejects_ancestor_for_0_block() { assert_matches!( Scope::with_ancestors( - para_id, relay_parent, base_constraints, pending_availability, @@ -149,7 +147,6 @@ fn scope_rejects_ancestor_for_0_block() { #[test] fn scope_only_takes_ancestors_up_to_min() { - let para_id = ParaId::from(5u32); let relay_parent = RelayChainBlockInfo { number: 5, hash: Hash::repeat_byte(0), @@ -179,7 +176,6 @@ fn scope_only_takes_ancestors_up_to_min() { let pending_availability = Vec::new(); let scope = Scope::with_ancestors( - para_id, relay_parent, base_constraints, pending_availability, @@ -194,7 +190,6 @@ fn scope_only_takes_ancestors_up_to_min() { #[test] fn scope_rejects_unordered_ancestors() { - let para_id = ParaId::from(5u32); let relay_parent = RelayChainBlockInfo { number: 5, hash: Hash::repeat_byte(0), @@ -225,7 +220,6 @@ fn scope_rejects_unordered_ancestors() { assert_matches!( Scope::with_ancestors( - para_id, relay_parent, base_constraints, pending_availability, @@ -257,876 +251,925 @@ fn candidate_storage_methods() { let mut wrong_pvd = pvd.clone(); wrong_pvd.max_pov_size = 0; assert_matches!( - storage.add_candidate(candidate.clone(), wrong_pvd, CandidateState::Seconded), - Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) + CandidateEntry::new( + candidate_hash, + candidate.clone(), + wrong_pvd.clone(), + CandidateState::Seconded + ), + Err(CandidateEntryError::PersistedValidationDataMismatch) + ); + assert_matches!( + CandidateEntry::new_seconded(candidate_hash, candidate.clone(), wrong_pvd), + Err(CandidateEntryError::PersistedValidationDataMismatch) ); + // Zero-length cycle. + { + let mut candidate = candidate.clone(); + candidate.commitments.head_data = HeadData(vec![1; 10]); + let mut pvd = pvd.clone(); + pvd.parent_head = HeadData(vec![1; 10]); + candidate.descriptor.persisted_validation_data_hash = pvd.hash(); + assert_matches!( + CandidateEntry::new_seconded(candidate_hash, candidate, pvd), + Err(CandidateEntryError::ZeroLengthCycle) + ); + } assert!(!storage.contains(&candidate_hash)); - assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0); - assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None); + assert_eq!(storage.mark_backed(&candidate_hash), false); + assert_eq!(storage.possible_backed_para_children(&parent_head_hash).count(), 0); assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); - assert_eq!(storage.is_backed(&candidate_hash), false); - // Add a valid candidate - storage - .add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded) - .unwrap(); + // Add a valid candidate. + let candidate_entry = CandidateEntry::new( + candidate_hash, + candidate.clone(), + pvd.clone(), + CandidateState::Seconded, + ) + .unwrap(); + storage.add_candidate_entry(candidate_entry.clone()).unwrap(); assert!(storage.contains(&candidate_hash)); - assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 1); - assert_eq!(storage.possible_para_children(&candidate.descriptor.para_head).count(), 0); - assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), Some(relay_parent)); + assert_eq!(storage.possible_backed_para_children(&parent_head_hash).count(), 0); + assert_eq!(storage.possible_backed_para_children(&candidate.descriptor.para_head).count(), 0); assert_eq!( storage.head_data_by_hash(&candidate.descriptor.para_head).unwrap(), &candidate.commitments.head_data ); assert_eq!(storage.head_data_by_hash(&parent_head_hash).unwrap(), &pvd.parent_head); - assert_eq!(storage.is_backed(&candidate_hash), false); - storage.mark_backed(&candidate_hash); - assert_eq!(storage.is_backed(&candidate_hash), true); + // Now mark it as backed + assert_eq!(storage.mark_backed(&candidate_hash), true); + // Marking it twice is fine. + assert_eq!(storage.mark_backed(&candidate_hash), true); + assert_eq!( + storage + .possible_backed_para_children(&parent_head_hash) + .map(|c| c.candidate_hash) + .collect::>(), + vec![candidate_hash] + ); + assert_eq!(storage.possible_backed_para_children(&candidate.descriptor.para_head).count(), 0); // Re-adding a candidate fails. assert_matches!( - storage.add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded), - Err(CandidateStorageInsertionError::CandidateAlreadyKnown(hash)) if candidate_hash == hash + storage.add_candidate_entry(candidate_entry), + Err(Error::CandidateAlreadyKnown) ); // Remove candidate and re-add it later in backed state. storage.remove_candidate(&candidate_hash); assert!(!storage.contains(&candidate_hash)); - assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0); - assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None); - assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); - assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); - assert_eq!(storage.is_backed(&candidate_hash), false); - storage - .add_candidate(candidate.clone(), pvd.clone(), CandidateState::Backed) - .unwrap(); - assert_eq!(storage.is_backed(&candidate_hash), true); - - // Test retain - storage.retain(|_| true); - assert!(storage.contains(&candidate_hash)); - storage.retain(|_| false); + // Removing it twice is fine. + storage.remove_candidate(&candidate_hash); assert!(!storage.contains(&candidate_hash)); - assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0); - assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None); + assert_eq!(storage.possible_backed_para_children(&parent_head_hash).count(), 0); assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); - assert_eq!(storage.is_backed(&candidate_hash), false); -} - -#[test] -fn populate_and_extend_from_storage_empty() { - // Empty chain and empty storage. - let storage = CandidateStorage::default(); - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let scope = Scope::with_ancestors( - ParaId::from(2), - RelayChainBlockInfo { - number: 1, - hash: Hash::repeat_byte(1), - storage_root: Hash::repeat_byte(2), - }, - base_constraints, - pending_availability, - 4, - vec![], - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - assert!(chain.to_vec().is_empty()); - chain.extend_from_storage(&storage); - assert!(chain.to_vec().is_empty()); -} - -#[test] -fn populate_and_extend_from_storage_with_existing_empty_to_vec() { - let mut storage = CandidateStorage::default(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - let relay_parent_b = Hash::repeat_byte(2); - let relay_parent_c = Hash::repeat_byte(3); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_b, - 1, - vec![0x0b].into(), - vec![0x0c].into(), - 1, - ); - let candidate_b_hash = candidate_b.hash(); - - let (pvd_c, candidate_c) = make_committed_candidate( - para_id, - relay_parent_c, - 2, - vec![0x0c].into(), - vec![0x0d].into(), - 2, - ); - let candidate_c_hash = candidate_c.hash(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - let relay_parent_b_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number, - hash: relay_parent_b, - storage_root: pvd_b.relay_parent_storage_root, - }; - let relay_parent_c_info = RelayChainBlockInfo { - number: pvd_c.relay_parent_number, - hash: relay_parent_c, - storage_root: pvd_c.relay_parent_storage_root, - }; - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let ancestors = vec![ - // These need to be ordered in reverse. - relay_parent_b_info.clone(), - relay_parent_a_info.clone(), - ]; - - storage - .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) - .unwrap(); - storage - .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Backed) - .unwrap(); storage - .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Backed) - .unwrap(); - - // Candidate A doesn't adhere to the base constraints. - { - for wrong_constraints in [ - // Different required parent - make_constraints(0, vec![0], vec![0x0e].into()), - // Min relay parent number is wrong - make_constraints(1, vec![0], vec![0x0a].into()), - ] { - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - wrong_constraints.clone(), - pending_availability.clone(), - 4, - ancestors.clone(), - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - - assert!(chain.to_vec().is_empty()); - - chain.extend_from_storage(&storage); - assert!(chain.to_vec().is_empty()); - - // If the min relay parent number is wrong, candidate A can never become valid. - // Otherwise, if only the required parent doesn't match, candidate A is still a - // potential candidate. - if wrong_constraints.min_relay_parent_number == 1 { - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate_a.hash(), - &candidate_a.descriptor.relay_parent, - pvd_a.parent_head.hash(), - Some(candidate_a.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - } else { - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate_a.hash(), - &candidate_a.descriptor.relay_parent, - pvd_a.parent_head.hash(), - Some(candidate_a.commitments.head_data.hash()), - ), - PotentialAddition::Anyhow - ); - } - - // All other candidates can always be potential candidates. - for (candidate, pvd) in - [(candidate_b.clone(), pvd_b.clone()), (candidate_c.clone(), pvd_c.clone())] - { - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::Anyhow - ); - } - } - } - - // Various max depths. - { - // depth is 0, will only allow 1 candidate - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 0, - ancestors.clone(), - ) + .add_pending_availability_candidate(candidate_hash, candidate.clone(), pvd) .unwrap(); - // Before populating the chain, all candidates are potential candidates. However, they can - // only be added as connected candidates, because only one candidates is allowed by max - // depth - let chain = FragmentChain::populate(scope.clone(), &CandidateStorage::default()); - for (candidate, pvd) in [ - (candidate_a.clone(), pvd_a.clone()), - (candidate_b.clone(), pvd_b.clone()), - (candidate_c.clone(), pvd_c.clone()), - ] { - assert_eq!( - chain.can_add_candidate_as_potential( - &CandidateStorage::default(), - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::IfConnected - ); - } - let mut chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash]); - chain.extend_from_storage(&storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash]); - // since depth is maxed out, we can't add more potential candidates - // candidate A is no longer a potential candidate because it's already present. - for (candidate, pvd) in [ - (candidate_a.clone(), pvd_a.clone()), - (candidate_b.clone(), pvd_b.clone()), - (candidate_c.clone(), pvd_c.clone()), - ] { - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - } - - // depth is 1, allows two candidates - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 1, - ancestors.clone(), - ) - .unwrap(); - // Before populating the chain, all candidates can be added as potential. - let mut modified_storage = CandidateStorage::default(); - let chain = FragmentChain::populate(scope.clone(), &modified_storage); - for (candidate, pvd) in [ - (candidate_a.clone(), pvd_a.clone()), - (candidate_b.clone(), pvd_b.clone()), - (candidate_c.clone(), pvd_c.clone()), - ] { - assert_eq!( - chain.can_add_candidate_as_potential( - &modified_storage, - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::Anyhow - ); - } - // Add an unconnected candidate. We now should only allow a Connected candidate, because max - // depth only allows one more candidate. - modified_storage - .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) - .unwrap(); - let chain = FragmentChain::populate(scope.clone(), &modified_storage); - for (candidate, pvd) in - [(candidate_a.clone(), pvd_a.clone()), (candidate_c.clone(), pvd_c.clone())] - { - assert_eq!( - chain.can_add_candidate_as_potential( - &modified_storage, - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::IfConnected - ); - } - - // Now try populating from all candidates. - let mut chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - chain.extend_from_storage(&storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - // since depth is maxed out, we can't add more potential candidates - // candidate A and B are no longer a potential candidate because they're already present. - for (candidate, pvd) in [ - (candidate_a.clone(), pvd_a.clone()), - (candidate_b.clone(), pvd_b.clone()), - (candidate_c.clone(), pvd_c.clone()), - ] { - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - } - - // depths larger than 2, allows all candidates - for depth in 2..6 { - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - depth, - ancestors.clone(), - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - chain.extend_from_storage(&storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - // Candidates are no longer potential candidates because they're already part of the - // chain. - for (candidate, pvd) in [ - (candidate_a.clone(), pvd_a.clone()), - (candidate_b.clone(), pvd_b.clone()), - (candidate_c.clone(), pvd_c.clone()), - ] { - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - } - } - } - - // Wrong relay parents - { - // Candidates A has relay parent out of scope. - let ancestors_without_a = vec![relay_parent_b_info.clone()]; - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 4, - ancestors_without_a, - ) - .unwrap(); - - let mut chain = FragmentChain::populate(scope, &storage); - assert!(chain.to_vec().is_empty()); - - chain.extend_from_storage(&storage); - assert!(chain.to_vec().is_empty()); - - // Candidate A is not a potential candidate, but candidates B and C still are. - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate_a.hash(), - &candidate_a.descriptor.relay_parent, - pvd_a.parent_head.hash(), - Some(candidate_a.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - for (candidate, pvd) in - [(candidate_b.clone(), pvd_b.clone()), (candidate_c.clone(), pvd_c.clone())] - { - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::Anyhow - ); - } - - // Candidate C has the same relay parent as candidate A's parent. Relay parent not allowed - // to move backwards - let mut modified_storage = storage.clone(); - modified_storage.remove_candidate(&candidate_c_hash); - let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( - para_id, - relay_parent_a, - 1, - vec![0x0c].into(), - vec![0x0d].into(), - 2, - ); - modified_storage - .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) - .unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 4, - ancestors.clone(), - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - chain.extend_from_storage(&modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - - // Candidate C is not even a potential candidate. - assert_eq!( - chain.can_add_candidate_as_potential( - &modified_storage, - &wrong_candidate_c.hash(), - &wrong_candidate_c.descriptor.relay_parent, - wrong_pvd_c.parent_head.hash(), - Some(wrong_candidate_c.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - } - - // Parachain fork and cycles are not allowed. - { - // Candidate C has the same parent as candidate B. - let mut modified_storage = storage.clone(); - modified_storage.remove_candidate(&candidate_c_hash); - let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( - para_id, - relay_parent_c, - 2, - vec![0x0b].into(), - vec![0x0d].into(), - 2, - ); - modified_storage - .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) - .unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 4, - ancestors.clone(), - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &modified_storage); - // We'll either have A->B or A->C. It's not deterministic because CandidateStorage uses - // HashSets and HashMaps. - if chain.to_vec() == vec![candidate_a_hash, candidate_b_hash] { - chain.extend_from_storage(&modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - // Candidate C is not even a potential candidate. - assert_eq!( - chain.can_add_candidate_as_potential( - &modified_storage, - &wrong_candidate_c.hash(), - &wrong_candidate_c.descriptor.relay_parent, - wrong_pvd_c.parent_head.hash(), - Some(wrong_candidate_c.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - } else if chain.to_vec() == vec![candidate_a_hash, wrong_candidate_c.hash()] { - chain.extend_from_storage(&modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, wrong_candidate_c.hash()]); - // Candidate B is not even a potential candidate. - assert_eq!( - chain.can_add_candidate_as_potential( - &modified_storage, - &candidate_b.hash(), - &candidate_b.descriptor.relay_parent, - pvd_b.parent_head.hash(), - Some(candidate_b.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - } else { - panic!("Unexpected chain: {:?}", chain.to_vec()); - } - - // Candidate C is a 0-length cycle. - // Candidate C has the same parent as candidate B. - let mut modified_storage = storage.clone(); - modified_storage.remove_candidate(&candidate_c_hash); - let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( - para_id, - relay_parent_c, - 2, - vec![0x0c].into(), - vec![0x0c].into(), - 2, - ); - modified_storage - .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) - .unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 4, - ancestors.clone(), - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - chain.extend_from_storage(&modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - // Candidate C is not even a potential candidate. - assert_eq!( - chain.can_add_candidate_as_potential( - &modified_storage, - &wrong_candidate_c.hash(), - &wrong_candidate_c.descriptor.relay_parent, - wrong_pvd_c.parent_head.hash(), - Some(wrong_candidate_c.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - - // Candidate C points back to the pre-state of candidate C. - let mut modified_storage = storage.clone(); - modified_storage.remove_candidate(&candidate_c_hash); - let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( - para_id, - relay_parent_c, - 2, - vec![0x0c].into(), - vec![0x0b].into(), - 2, - ); - modified_storage - .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) - .unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 4, - ancestors.clone(), - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - chain.extend_from_storage(&modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - // Candidate C is not even a potential candidate. - assert_eq!( - chain.can_add_candidate_as_potential( - &modified_storage, - &wrong_candidate_c.hash(), - &wrong_candidate_c.descriptor.relay_parent, - wrong_pvd_c.parent_head.hash(), - Some(wrong_candidate_c.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - } - - // Test with candidates pending availability - { - // Valid options - for pending in [ - vec![PendingAvailability { - candidate_hash: candidate_a_hash, - relay_parent: relay_parent_a_info.clone(), - }], - vec![ - PendingAvailability { - candidate_hash: candidate_a_hash, - relay_parent: relay_parent_a_info.clone(), - }, - PendingAvailability { - candidate_hash: candidate_b_hash, - relay_parent: relay_parent_b_info.clone(), - }, - ], - vec![ - PendingAvailability { - candidate_hash: candidate_a_hash, - relay_parent: relay_parent_a_info.clone(), - }, - PendingAvailability { - candidate_hash: candidate_b_hash, - relay_parent: relay_parent_b_info.clone(), - }, - PendingAvailability { - candidate_hash: candidate_c_hash, - relay_parent: relay_parent_c_info.clone(), - }, - ], - ] { - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - pending, - 3, - ancestors.clone(), - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - chain.extend_from_storage(&storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - } - - // Relay parents of pending availability candidates can be out of scope - // Relay parent of candidate A is out of scope. - let ancestors_without_a = vec![relay_parent_b_info.clone()]; - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - vec![PendingAvailability { - candidate_hash: candidate_a_hash, - relay_parent: relay_parent_a_info.clone(), - }], - 4, - ancestors_without_a, - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - chain.extend_from_storage(&storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - - // Even relay parents of pending availability candidates which are out of scope cannot move - // backwards. - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - vec![ - PendingAvailability { - candidate_hash: candidate_a_hash, - relay_parent: RelayChainBlockInfo { - hash: relay_parent_a_info.hash, - number: 1, - storage_root: relay_parent_a_info.storage_root, - }, - }, - PendingAvailability { - candidate_hash: candidate_b_hash, - relay_parent: RelayChainBlockInfo { - hash: relay_parent_b_info.hash, - number: 0, - storage_root: relay_parent_b_info.storage_root, - }, - }, - ], - 4, - vec![], - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - assert!(chain.to_vec().is_empty()); - - chain.extend_from_storage(&storage); - assert!(chain.to_vec().is_empty()); - } -} - -#[test] -fn extend_from_storage_with_existing_to_vec() { - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - let relay_parent_b = Hash::repeat_byte(2); - let relay_parent_d = Hash::repeat_byte(3); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_b, - 1, - vec![0x0b].into(), - vec![0x0c].into(), - 1, - ); - let candidate_b_hash = candidate_b.hash(); - - let (pvd_c, candidate_c) = make_committed_candidate( - para_id, - // Use the same relay parent number as B to test that it doesn't need to change between - // candidates. - relay_parent_b, - 1, - vec![0x0c].into(), - vec![0x0d].into(), - 1, - ); - let candidate_c_hash = candidate_c.hash(); - - // Candidate D will never be added to the chain. - let (pvd_d, candidate_d) = make_committed_candidate( - para_id, - relay_parent_d, - 2, - vec![0x0e].into(), - vec![0x0f].into(), - 1, - ); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - let relay_parent_b_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number, - hash: relay_parent_b, - storage_root: pvd_b.relay_parent_storage_root, - }; - let relay_parent_d_info = RelayChainBlockInfo { - number: pvd_d.relay_parent_number, - hash: relay_parent_d, - storage_root: pvd_d.relay_parent_storage_root, - }; - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let ancestors = vec![ - // These need to be ordered in reverse. - relay_parent_b_info.clone(), - relay_parent_a_info.clone(), - ]; + assert!(storage.contains(&candidate_hash)); - // Already had A and C in the storage. Introduce B, which should add both B and C to the chain - // now. - { - let mut storage = CandidateStorage::default(); - storage - .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) - .unwrap(); - storage - .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Seconded) - .unwrap(); + assert_eq!( storage - .add_candidate(candidate_d.clone(), pvd_d.clone(), CandidateState::Seconded) - .unwrap(); - - let scope = Scope::with_ancestors( - para_id, - relay_parent_d_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 4, - ancestors.clone(), - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash]); + .possible_backed_para_children(&parent_head_hash) + .map(|c| c.candidate_hash) + .collect::>(), + vec![candidate_hash] + ); + assert_eq!(storage.possible_backed_para_children(&candidate.descriptor.para_head).count(), 0); - storage - .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) - .unwrap(); - chain.extend_from_storage(&storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - } + // Now add a second candidate in Seconded state. This will be a fork. + let (pvd_2, candidate_2) = make_committed_candidate( + ParaId::from(5u32), + relay_parent, + 8, + vec![4, 5, 6].into(), + vec![2, 3, 4].into(), + 7, + ); + let candidate_hash_2 = candidate_2.hash(); + let candidate_entry_2 = + CandidateEntry::new_seconded(candidate_hash_2, candidate_2, pvd_2).unwrap(); - // Already had A and B in the chain. Introduce C. - { - let mut storage = CandidateStorage::default(); - storage - .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) - .unwrap(); - storage - .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) - .unwrap(); + storage.add_candidate_entry(candidate_entry_2).unwrap(); + assert_eq!( storage - .add_candidate(candidate_d.clone(), pvd_d.clone(), CandidateState::Seconded) - .unwrap(); - - let scope = Scope::with_ancestors( - para_id, - relay_parent_d_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 4, - ancestors.clone(), - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + .possible_backed_para_children(&parent_head_hash) + .map(|c| c.candidate_hash) + .collect::>(), + vec![candidate_hash] + ); + // Now mark it as backed. + storage.mark_backed(&candidate_hash_2); + assert_eq!( storage - .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Seconded) - .unwrap(); - chain.extend_from_storage(&storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - } + .possible_backed_para_children(&parent_head_hash) + .map(|c| c.candidate_hash) + .collect::>(), + [candidate_hash, candidate_hash_2].into_iter().collect() + ); } +// #[test] +// fn populate_and_extend_from_storage_empty() { +// // Empty chain and empty storage. +// let storage = CandidateStorage::default(); +// let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); +// let pending_availability = Vec::new(); + +// let scope = Scope::with_ancestors( +// RelayChainBlockInfo { +// number: 1, +// hash: Hash::repeat_byte(1), +// storage_root: Hash::repeat_byte(2), +// }, +// base_constraints, +// pending_availability, +// 4, +// vec![], +// ) +// .unwrap(); +// let mut chain = FragmentChain::populate(scope, &storage); +// assert!(chain.to_vec().is_empty()); + +// chain.extend_from_storage(&storage); +// assert!(chain.to_vec().is_empty()); +// } + +// #[test] +// fn populate_and_extend_from_storage_with_existing_empty_to_vec() { +// let mut storage = CandidateStorage::default(); + +// let para_id = ParaId::from(5u32); +// let relay_parent_a = Hash::repeat_byte(1); +// let relay_parent_b = Hash::repeat_byte(2); +// let relay_parent_c = Hash::repeat_byte(3); + +// let (pvd_a, candidate_a) = make_committed_candidate( +// para_id, +// relay_parent_a, +// 0, +// vec![0x0a].into(), +// vec![0x0b].into(), +// 0, +// ); +// let candidate_a_hash = candidate_a.hash(); + +// let (pvd_b, candidate_b) = make_committed_candidate( +// para_id, +// relay_parent_b, +// 1, +// vec![0x0b].into(), +// vec![0x0c].into(), +// 1, +// ); +// let candidate_b_hash = candidate_b.hash(); + +// let (pvd_c, candidate_c) = make_committed_candidate( +// para_id, +// relay_parent_c, +// 2, +// vec![0x0c].into(), +// vec![0x0d].into(), +// 2, +// ); +// let candidate_c_hash = candidate_c.hash(); + +// let relay_parent_a_info = RelayChainBlockInfo { +// number: pvd_a.relay_parent_number, +// hash: relay_parent_a, +// storage_root: pvd_a.relay_parent_storage_root, +// }; +// let relay_parent_b_info = RelayChainBlockInfo { +// number: pvd_b.relay_parent_number, +// hash: relay_parent_b, +// storage_root: pvd_b.relay_parent_storage_root, +// }; +// let relay_parent_c_info = RelayChainBlockInfo { +// number: pvd_c.relay_parent_number, +// hash: relay_parent_c, +// storage_root: pvd_c.relay_parent_storage_root, +// }; + +// let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); +// let pending_availability = Vec::new(); + +// let ancestors = vec![ +// // These need to be ordered in reverse. +// relay_parent_b_info.clone(), +// relay_parent_a_info.clone(), +// ]; + +// storage +// .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) +// .unwrap(); +// storage +// .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Backed) +// .unwrap(); +// storage +// .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Backed) +// .unwrap(); + +// // Candidate A doesn't adhere to the base constraints. +// { +// for wrong_constraints in [ +// // Different required parent +// make_constraints(0, vec![0], vec![0x0e].into()), +// // Min relay parent number is wrong +// make_constraints(1, vec![0], vec![0x0a].into()), +// ] { +// let scope = Scope::with_ancestors( +// relay_parent_c_info.clone(), +// wrong_constraints.clone(), +// pending_availability.clone(), +// 4, +// ancestors.clone(), +// ) +// .unwrap(); +// let mut chain = FragmentChain::populate(scope, &storage); + +// assert!(chain.to_vec().is_empty()); + +// chain.extend_from_storage(&storage); +// assert!(chain.to_vec().is_empty()); + +// // If the min relay parent number is wrong, candidate A can never become valid. +// // Otherwise, if only the required parent doesn't match, candidate A is still a +// // potential candidate. +// if wrong_constraints.min_relay_parent_number == 1 { +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &storage, +// &candidate_a.hash(), +// &candidate_a.descriptor.relay_parent, +// pvd_a.parent_head.hash(), +// Some(candidate_a.commitments.head_data.hash()), +// ), +// PotentialAddition::None +// ); +// } else { +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &storage, +// &candidate_a.hash(), +// &candidate_a.descriptor.relay_parent, +// pvd_a.parent_head.hash(), +// Some(candidate_a.commitments.head_data.hash()), +// ), +// PotentialAddition::Anyhow +// ); +// } + +// // All other candidates can always be potential candidates. +// for (candidate, pvd) in +// [(candidate_b.clone(), pvd_b.clone()), (candidate_c.clone(), pvd_c.clone())] +// { +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &storage, +// &candidate.hash(), +// &candidate.descriptor.relay_parent, +// pvd.parent_head.hash(), +// Some(candidate.commitments.head_data.hash()), +// ), +// PotentialAddition::Anyhow +// ); +// } +// } +// } + +// // Various max depths. +// { +// // depth is 0, will only allow 1 candidate +// let scope = Scope::with_ancestors( +// relay_parent_c_info.clone(), +// base_constraints.clone(), +// pending_availability.clone(), +// 0, +// ancestors.clone(), +// ) +// .unwrap(); +// // Before populating the chain, all candidates are potential candidates. However, they can +// // only be added as connected candidates, because only one candidates is allowed by max +// // depth +// let chain = FragmentChain::populate(scope.clone(), &CandidateStorage::default()); +// for (candidate, pvd) in [ +// (candidate_a.clone(), pvd_a.clone()), +// (candidate_b.clone(), pvd_b.clone()), +// (candidate_c.clone(), pvd_c.clone()), +// ] { +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &CandidateStorage::default(), +// &candidate.hash(), +// &candidate.descriptor.relay_parent, +// pvd.parent_head.hash(), +// Some(candidate.commitments.head_data.hash()), +// ), +// PotentialAddition::IfConnected +// ); +// } +// let mut chain = FragmentChain::populate(scope, &storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash]); +// chain.extend_from_storage(&storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash]); +// // since depth is maxed out, we can't add more potential candidates +// // candidate A is no longer a potential candidate because it's already present. +// for (candidate, pvd) in [ +// (candidate_a.clone(), pvd_a.clone()), +// (candidate_b.clone(), pvd_b.clone()), +// (candidate_c.clone(), pvd_c.clone()), +// ] { +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &storage, +// &candidate.hash(), +// &candidate.descriptor.relay_parent, +// pvd.parent_head.hash(), +// Some(candidate.commitments.head_data.hash()), +// ), +// PotentialAddition::None +// ); +// } + +// // depth is 1, allows two candidates +// let scope = Scope::with_ancestors( +// relay_parent_c_info.clone(), +// base_constraints.clone(), +// pending_availability.clone(), +// 1, +// ancestors.clone(), +// ) +// .unwrap(); +// // Before populating the chain, all candidates can be added as potential. +// let mut modified_storage = CandidateStorage::default(); +// let chain = FragmentChain::populate(scope.clone(), &modified_storage); +// for (candidate, pvd) in [ +// (candidate_a.clone(), pvd_a.clone()), +// (candidate_b.clone(), pvd_b.clone()), +// (candidate_c.clone(), pvd_c.clone()), +// ] { +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &modified_storage, +// &candidate.hash(), +// &candidate.descriptor.relay_parent, +// pvd.parent_head.hash(), +// Some(candidate.commitments.head_data.hash()), +// ), +// PotentialAddition::Anyhow +// ); +// } +// // Add an unconnected candidate. We now should only allow a Connected candidate, because max +// // depth only allows one more candidate. +// modified_storage +// .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) +// .unwrap(); +// let chain = FragmentChain::populate(scope.clone(), &modified_storage); +// for (candidate, pvd) in +// [(candidate_a.clone(), pvd_a.clone()), (candidate_c.clone(), pvd_c.clone())] +// { +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &modified_storage, +// &candidate.hash(), +// &candidate.descriptor.relay_parent, +// pvd.parent_head.hash(), +// Some(candidate.commitments.head_data.hash()), +// ), +// PotentialAddition::IfConnected +// ); +// } + +// // Now try populating from all candidates. +// let mut chain = FragmentChain::populate(scope, &storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); +// chain.extend_from_storage(&storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); +// // since depth is maxed out, we can't add more potential candidates +// // candidate A and B are no longer a potential candidate because they're already present. +// for (candidate, pvd) in [ +// (candidate_a.clone(), pvd_a.clone()), +// (candidate_b.clone(), pvd_b.clone()), +// (candidate_c.clone(), pvd_c.clone()), +// ] { +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &storage, +// &candidate.hash(), +// &candidate.descriptor.relay_parent, +// pvd.parent_head.hash(), +// Some(candidate.commitments.head_data.hash()), +// ), +// PotentialAddition::None +// ); +// } + +// // depths larger than 2, allows all candidates +// for depth in 2..6 { +// let scope = Scope::with_ancestors( +// relay_parent_c_info.clone(), +// base_constraints.clone(), +// pending_availability.clone(), +// depth, +// ancestors.clone(), +// ) +// .unwrap(); +// let mut chain = FragmentChain::populate(scope, &storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); +// chain.extend_from_storage(&storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); +// // Candidates are no longer potential candidates because they're already part of the +// // chain. +// for (candidate, pvd) in [ +// (candidate_a.clone(), pvd_a.clone()), +// (candidate_b.clone(), pvd_b.clone()), +// (candidate_c.clone(), pvd_c.clone()), +// ] { +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &storage, +// &candidate.hash(), +// &candidate.descriptor.relay_parent, +// pvd.parent_head.hash(), +// Some(candidate.commitments.head_data.hash()), +// ), +// PotentialAddition::None +// ); +// } +// } +// } + +// // Wrong relay parents +// { +// // Candidates A has relay parent out of scope. +// let ancestors_without_a = vec![relay_parent_b_info.clone()]; +// let scope = Scope::with_ancestors( +// relay_parent_c_info.clone(), +// base_constraints.clone(), +// pending_availability.clone(), +// 4, +// ancestors_without_a, +// ) +// .unwrap(); + +// let mut chain = FragmentChain::populate(scope, &storage); +// assert!(chain.to_vec().is_empty()); + +// chain.extend_from_storage(&storage); +// assert!(chain.to_vec().is_empty()); + +// // Candidate A is not a potential candidate, but candidates B and C still are. +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &storage, +// &candidate_a.hash(), +// &candidate_a.descriptor.relay_parent, +// pvd_a.parent_head.hash(), +// Some(candidate_a.commitments.head_data.hash()), +// ), +// PotentialAddition::None +// ); +// for (candidate, pvd) in +// [(candidate_b.clone(), pvd_b.clone()), (candidate_c.clone(), pvd_c.clone())] +// { +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &storage, +// &candidate.hash(), +// &candidate.descriptor.relay_parent, +// pvd.parent_head.hash(), +// Some(candidate.commitments.head_data.hash()), +// ), +// PotentialAddition::Anyhow +// ); +// } + +// // Candidate C has the same relay parent as candidate A's parent. Relay parent not allowed +// // to move backwards +// let mut modified_storage = storage.clone(); +// modified_storage.remove_candidate(&candidate_c_hash); +// let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( +// para_id, +// relay_parent_a, +// 1, +// vec![0x0c].into(), +// vec![0x0d].into(), +// 2, +// ); +// modified_storage +// .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) +// .unwrap(); +// let scope = Scope::with_ancestors( +// relay_parent_c_info.clone(), +// base_constraints.clone(), +// pending_availability.clone(), +// 4, +// ancestors.clone(), +// ) +// .unwrap(); +// let mut chain = FragmentChain::populate(scope, &modified_storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); +// chain.extend_from_storage(&modified_storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + +// // Candidate C is not even a potential candidate. +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &modified_storage, +// &wrong_candidate_c.hash(), +// &wrong_candidate_c.descriptor.relay_parent, +// wrong_pvd_c.parent_head.hash(), +// Some(wrong_candidate_c.commitments.head_data.hash()), +// ), +// PotentialAddition::None +// ); +// } + +// // Parachain fork and cycles are not allowed. +// { +// // Candidate C has the same parent as candidate B. +// let mut modified_storage = storage.clone(); +// modified_storage.remove_candidate(&candidate_c_hash); +// let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( +// para_id, +// relay_parent_c, +// 2, +// vec![0x0b].into(), +// vec![0x0d].into(), +// 2, +// ); +// modified_storage +// .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) +// .unwrap(); +// let scope = Scope::with_ancestors( +// relay_parent_c_info.clone(), +// base_constraints.clone(), +// pending_availability.clone(), +// 4, +// ancestors.clone(), +// ) +// .unwrap(); +// let mut chain = FragmentChain::populate(scope, &modified_storage); +// // We'll either have A->B or A->C. It's not deterministic because CandidateStorage uses +// // HashSets and HashMaps. +// if chain.to_vec() == vec![candidate_a_hash, candidate_b_hash] { +// chain.extend_from_storage(&modified_storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); +// // Candidate C is not even a potential candidate. +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &modified_storage, +// &wrong_candidate_c.hash(), +// &wrong_candidate_c.descriptor.relay_parent, +// wrong_pvd_c.parent_head.hash(), +// Some(wrong_candidate_c.commitments.head_data.hash()), +// ), +// PotentialAddition::None +// ); +// } else if chain.to_vec() == vec![candidate_a_hash, wrong_candidate_c.hash()] { +// chain.extend_from_storage(&modified_storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, wrong_candidate_c.hash()]); +// // Candidate B is not even a potential candidate. +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &modified_storage, +// &candidate_b.hash(), +// &candidate_b.descriptor.relay_parent, +// pvd_b.parent_head.hash(), +// Some(candidate_b.commitments.head_data.hash()), +// ), +// PotentialAddition::None +// ); +// } else { +// panic!("Unexpected chain: {:?}", chain.to_vec()); +// } + +// // Candidate C is a 0-length cycle. +// // Candidate C has the same parent as candidate B. +// let mut modified_storage = storage.clone(); +// modified_storage.remove_candidate(&candidate_c_hash); +// let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( +// para_id, +// relay_parent_c, +// 2, +// vec![0x0c].into(), +// vec![0x0c].into(), +// 2, +// ); +// modified_storage +// .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) +// .unwrap(); +// let scope = Scope::with_ancestors( +// relay_parent_c_info.clone(), +// base_constraints.clone(), +// pending_availability.clone(), +// 4, +// ancestors.clone(), +// ) +// .unwrap(); +// let mut chain = FragmentChain::populate(scope, &modified_storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); +// chain.extend_from_storage(&modified_storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); +// // Candidate C is not even a potential candidate. +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &modified_storage, +// &wrong_candidate_c.hash(), +// &wrong_candidate_c.descriptor.relay_parent, +// wrong_pvd_c.parent_head.hash(), +// Some(wrong_candidate_c.commitments.head_data.hash()), +// ), +// PotentialAddition::None +// ); + +// // Candidate C points back to the pre-state of candidate C. +// let mut modified_storage = storage.clone(); +// modified_storage.remove_candidate(&candidate_c_hash); +// let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( +// para_id, +// relay_parent_c, +// 2, +// vec![0x0c].into(), +// vec![0x0b].into(), +// 2, +// ); +// modified_storage +// .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) +// .unwrap(); +// let scope = Scope::with_ancestors( +// relay_parent_c_info.clone(), +// base_constraints.clone(), +// pending_availability.clone(), +// 4, +// ancestors.clone(), +// ) +// .unwrap(); +// let mut chain = FragmentChain::populate(scope, &modified_storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); +// chain.extend_from_storage(&modified_storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); +// // Candidate C is not even a potential candidate. +// assert_eq!( +// chain.can_add_candidate_as_potential( +// &modified_storage, +// &wrong_candidate_c.hash(), +// &wrong_candidate_c.descriptor.relay_parent, +// wrong_pvd_c.parent_head.hash(), +// Some(wrong_candidate_c.commitments.head_data.hash()), +// ), +// PotentialAddition::None +// ); +// } + +// // Test with candidates pending availability +// { +// // Valid options +// for pending in [ +// vec![PendingAvailability { +// candidate_hash: candidate_a_hash, +// relay_parent: relay_parent_a_info.clone(), +// }], +// vec![ +// PendingAvailability { +// candidate_hash: candidate_a_hash, +// relay_parent: relay_parent_a_info.clone(), +// }, +// PendingAvailability { +// candidate_hash: candidate_b_hash, +// relay_parent: relay_parent_b_info.clone(), +// }, +// ], +// vec![ +// PendingAvailability { +// candidate_hash: candidate_a_hash, +// relay_parent: relay_parent_a_info.clone(), +// }, +// PendingAvailability { +// candidate_hash: candidate_b_hash, +// relay_parent: relay_parent_b_info.clone(), +// }, +// PendingAvailability { +// candidate_hash: candidate_c_hash, +// relay_parent: relay_parent_c_info.clone(), +// }, +// ], +// ] { +// let scope = Scope::with_ancestors( +// relay_parent_c_info.clone(), +// base_constraints.clone(), +// pending, +// 3, +// ancestors.clone(), +// ) +// .unwrap(); +// let mut chain = FragmentChain::populate(scope, &storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); +// chain.extend_from_storage(&storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); +// } + +// // Relay parents of pending availability candidates can be out of scope +// // Relay parent of candidate A is out of scope. +// let ancestors_without_a = vec![relay_parent_b_info.clone()]; +// let scope = Scope::with_ancestors( +// relay_parent_c_info.clone(), +// base_constraints.clone(), +// vec![PendingAvailability { +// candidate_hash: candidate_a_hash, +// relay_parent: relay_parent_a_info.clone(), +// }], +// 4, +// ancestors_without_a, +// ) +// .unwrap(); +// let mut chain = FragmentChain::populate(scope, &storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); +// chain.extend_from_storage(&storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + +// // Even relay parents of pending availability candidates which are out of scope cannot move +// // backwards. +// let scope = Scope::with_ancestors( +// relay_parent_c_info.clone(), +// base_constraints.clone(), +// vec![ +// PendingAvailability { +// candidate_hash: candidate_a_hash, +// relay_parent: RelayChainBlockInfo { +// hash: relay_parent_a_info.hash, +// number: 1, +// storage_root: relay_parent_a_info.storage_root, +// }, +// }, +// PendingAvailability { +// candidate_hash: candidate_b_hash, +// relay_parent: RelayChainBlockInfo { +// hash: relay_parent_b_info.hash, +// number: 0, +// storage_root: relay_parent_b_info.storage_root, +// }, +// }, +// ], +// 4, +// vec![], +// ) +// .unwrap(); +// let mut chain = FragmentChain::populate(scope, &storage); +// assert!(chain.to_vec().is_empty()); + +// chain.extend_from_storage(&storage); +// assert!(chain.to_vec().is_empty()); +// } +// } + +// #[test] +// fn extend_from_storage_with_existing_to_vec() { +// let para_id = ParaId::from(5u32); +// let relay_parent_a = Hash::repeat_byte(1); +// let relay_parent_b = Hash::repeat_byte(2); +// let relay_parent_d = Hash::repeat_byte(3); + +// let (pvd_a, candidate_a) = make_committed_candidate( +// para_id, +// relay_parent_a, +// 0, +// vec![0x0a].into(), +// vec![0x0b].into(), +// 0, +// ); +// let candidate_a_hash = candidate_a.hash(); + +// let (pvd_b, candidate_b) = make_committed_candidate( +// para_id, +// relay_parent_b, +// 1, +// vec![0x0b].into(), +// vec![0x0c].into(), +// 1, +// ); +// let candidate_b_hash = candidate_b.hash(); + +// let (pvd_c, candidate_c) = make_committed_candidate( +// para_id, +// // Use the same relay parent number as B to test that it doesn't need to change between +// // candidates. +// relay_parent_b, +// 1, +// vec![0x0c].into(), +// vec![0x0d].into(), +// 1, +// ); +// let candidate_c_hash = candidate_c.hash(); + +// // Candidate D will never be added to the chain. +// let (pvd_d, candidate_d) = make_committed_candidate( +// para_id, +// relay_parent_d, +// 2, +// vec![0x0e].into(), +// vec![0x0f].into(), +// 1, +// ); + +// let relay_parent_a_info = RelayChainBlockInfo { +// number: pvd_a.relay_parent_number, +// hash: relay_parent_a, +// storage_root: pvd_a.relay_parent_storage_root, +// }; +// let relay_parent_b_info = RelayChainBlockInfo { +// number: pvd_b.relay_parent_number, +// hash: relay_parent_b, +// storage_root: pvd_b.relay_parent_storage_root, +// }; +// let relay_parent_d_info = RelayChainBlockInfo { +// number: pvd_d.relay_parent_number, +// hash: relay_parent_d, +// storage_root: pvd_d.relay_parent_storage_root, +// }; + +// let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); +// let pending_availability = Vec::new(); + +// let ancestors = vec![ +// // These need to be ordered in reverse. +// relay_parent_b_info.clone(), +// relay_parent_a_info.clone(), +// ]; + +// // Already had A and C in the storage. Introduce B, which should add both B and C to the chain +// // now. +// { +// let mut storage = CandidateStorage::default(); +// storage +// .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) +// .unwrap(); +// storage +// .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Seconded) +// .unwrap(); +// storage +// .add_candidate(candidate_d.clone(), pvd_d.clone(), CandidateState::Seconded) +// .unwrap(); + +// let scope = Scope::with_ancestors( +// relay_parent_d_info.clone(), +// base_constraints.clone(), +// pending_availability.clone(), +// 4, +// ancestors.clone(), +// ) +// .unwrap(); +// let mut chain = FragmentChain::populate(scope, &storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash]); + +// storage +// .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) +// .unwrap(); +// chain.extend_from_storage(&storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); +// } + +// // Already had A and B in the chain. Introduce C. +// { +// let mut storage = CandidateStorage::default(); +// storage +// .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) +// .unwrap(); +// storage +// .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) +// .unwrap(); +// storage +// .add_candidate(candidate_d.clone(), pvd_d.clone(), CandidateState::Seconded) +// .unwrap(); + +// let scope = Scope::with_ancestors( +// relay_parent_d_info.clone(), +// base_constraints.clone(), +// pending_availability.clone(), +// 4, +// ancestors.clone(), +// ) +// .unwrap(); +// let mut chain = FragmentChain::populate(scope, &storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + +// storage +// .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Seconded) +// .unwrap(); +// chain.extend_from_storage(&storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); +// } +// } + #[test] -fn test_find_ancestor_path_and_find_backable_chain_empty_to_vec() { - let para_id = ParaId::from(5u32); +fn test_find_ancestor_path_and_find_backable_chain_empty_best_chain() { let relay_parent = Hash::repeat_byte(1); let required_parent: HeadData = vec![0xff].into(); let max_depth = 10; @@ -1138,616 +1181,598 @@ fn test_find_ancestor_path_and_find_backable_chain_empty_to_vec() { let relay_parent_info = RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() }; - let scope = Scope::with_ancestors( - para_id, - relay_parent_info, - base_constraints, - vec![], - max_depth, - vec![], - ) - .unwrap(); - let chain = FragmentChain::populate(scope, &storage); - assert!(chain.to_vec().is_empty()); - - assert_eq!(chain.find_ancestor_path(Ancestors::new()), 0); - assert_eq!(chain.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]); - // Invalid candidate. - let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); - assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); - assert_eq!(chain.find_backable_chain(ancestors, 2, |_| true), vec![]); -} - -#[test] -fn test_find_ancestor_path_and_find_backable_to_vec() { - let para_id = ParaId::from(5u32); - let relay_parent = Hash::repeat_byte(1); - let required_parent: HeadData = vec![0xff].into(); - let max_depth = 5; - let relay_parent_number = 0; - let relay_parent_storage_root = Hash::repeat_byte(69); - - let mut candidates = vec![]; - - // Candidate 0 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - required_parent.clone(), - vec![0].into(), - 0, - )); - // Candidate 1 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![0].into(), - vec![1].into(), - 0, - )); - // Candidate 2 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![1].into(), - vec![2].into(), - 0, - )); - // Candidate 3 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![2].into(), - vec![3].into(), - 0, - )); - // Candidate 4 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![3].into(), - vec![4].into(), - 0, - )); - // Candidate 5 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![4].into(), - vec![5].into(), - 0, - )); - - let base_constraints = make_constraints(0, vec![0], required_parent.clone()); - let mut storage = CandidateStorage::default(); - - let relay_parent_info = RelayChainBlockInfo { - number: relay_parent_number, - hash: relay_parent, - storage_root: relay_parent_storage_root, - }; - - for (pvd, candidate) in candidates.iter() { - storage - .add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded) + let scope = + Scope::with_ancestors(relay_parent_info, base_constraints, vec![], max_depth, vec![]) .unwrap(); - } - let candidates = candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::>(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_info.clone(), - base_constraints.clone(), - vec![], - max_depth, - vec![], - ) - .unwrap(); - let chain = FragmentChain::populate(scope, &storage); - - assert_eq!(candidates.len(), 6); - assert_eq!(chain.to_vec().len(), 6); + let chain = FragmentChain::populate(scope, storage); + assert_eq!(chain.best_chain_len(), 0); - // No ancestors supplied. assert_eq!(chain.find_ancestor_path(Ancestors::new()), 0); - assert_eq!(chain.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); - assert_eq!( - chain.find_backable_chain(Ancestors::new(), 1, |_| true), - [0].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - assert_eq!( - chain.find_backable_chain(Ancestors::new(), 2, |_| true), - [0, 1].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - assert_eq!( - chain.find_backable_chain(Ancestors::new(), 5, |_| true), - [0, 1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - for count in 6..10 { - assert_eq!( - chain.find_backable_chain(Ancestors::new(), count, |_| true), - [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } - - assert_eq!( - chain.find_backable_chain(Ancestors::new(), 7, |_| true), - [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - assert_eq!( - chain.find_backable_chain(Ancestors::new(), 10, |_| true), - [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // Ancestor which is not part of the chain. Will be ignored. + assert_eq!(chain.find_backable_chain(Ancestors::new(), 2), vec![]); + // Invalid candidate. let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); - assert_eq!( - chain.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - let ancestors: Ancestors = - [candidates[1].hash(), CandidateHash::default()].into_iter().collect(); - assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); - assert_eq!( - chain.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - let ancestors: Ancestors = - [candidates[0].hash(), CandidateHash::default()].into_iter().collect(); - assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1); - assert_eq!( - chain.find_backable_chain(ancestors, 4, |_| true), - [1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // Ancestors which are part of the chain but don't form a path from root. Will be ignored. - let ancestors: Ancestors = [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); - assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); - assert_eq!( - chain.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // Valid ancestors. - let ancestors: Ancestors = [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] - .into_iter() - .collect(); - assert_eq!(chain.find_ancestor_path(ancestors.clone()), 3); - assert_eq!( - chain.find_backable_chain(ancestors.clone(), 2, |_| true), - [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - for count in 3..10 { - assert_eq!( - chain.find_backable_chain(ancestors.clone(), count, |_| true), - [3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } - - // Valid ancestors with candidates which have been omitted due to timeouts - let ancestors: Ancestors = [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); - assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1); - assert_eq!( - chain.find_backable_chain(ancestors.clone(), 3, |_| true), - [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - assert_eq!( - chain.find_backable_chain(ancestors.clone(), 4, |_| true), - [1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - for count in 5..10 { - assert_eq!( - chain.find_backable_chain(ancestors.clone(), count, |_| true), - [1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } - - let ancestors: Ancestors = [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] - .into_iter() - .collect(); - assert_eq!(chain.find_ancestor_path(ancestors.clone()), 2); - assert_eq!( - chain.find_backable_chain(ancestors.clone(), 4, |_| true), - [2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // Requested count is 0. - assert_eq!(chain.find_backable_chain(ancestors, 0, |_| true), vec![]); - - // Stop when we've found a candidate for which pred returns false. - let ancestors: Ancestors = [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] - .into_iter() - .collect(); - for count in 1..10 { - assert_eq!( - // Stop at 4. - chain.find_backable_chain(ancestors.clone(), count, |hash| hash != - &candidates[4].hash()), - [3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } - - // Stop when we've found a candidate which is pending availability - { - let scope = Scope::with_ancestors( - para_id, - relay_parent_info.clone(), - base_constraints, - // Mark the third candidate as pending availability - vec![PendingAvailability { - candidate_hash: candidates[3].hash(), - relay_parent: relay_parent_info, - }], - max_depth, - vec![], - ) - .unwrap(); - let chain = FragmentChain::populate(scope, &storage); - let ancestors: Ancestors = - [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); - assert_eq!( - // Stop at 4. - chain.find_backable_chain(ancestors.clone(), 3, |_| true), - [2].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } + assert_eq!(chain.find_backable_chain(ancestors, 2), vec![]); } -#[test] -fn hypothetical_membership() { - let mut storage = CandidateStorage::default(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0c].into(), - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a, CandidateState::Seconded).unwrap(); - storage.add_candidate(candidate_b, pvd_b, CandidateState::Seconded).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info.clone(), - base_constraints.clone(), - vec![], - max_depth, - vec![], - ) - .unwrap(); - let chain = FragmentChain::populate(scope, &storage); - - assert_eq!(chain.to_vec().len(), 2); - - // Check candidates which are already present - assert!(chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: candidate_a_hash, - }, - &storage, - )); - assert!(chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: candidate_b_hash, - }, - &storage, - )); - - // Forks not allowed. - assert!(!chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: CandidateHash(Hash::repeat_byte(21)), - }, - &storage, - )); - assert!(!chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: CandidateHash(Hash::repeat_byte(22)), - }, - &storage, - )); - - // Unknown candidate which builds on top of the current chain. - assert!(chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: CandidateHash(Hash::repeat_byte(23)), - }, - &storage, - )); - - // Unknown unconnected candidate which may be valid. - assert!(chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0e]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: CandidateHash(Hash::repeat_byte(23)), - }, - &storage, - )); - - // The number of unconnected candidates is limited (chain.len() + unconnected) <= max_depth - { - // C will be an unconnected candidate. - let (pvd_c, candidate_c) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0e].into(), - vec![0x0f].into(), - 0, - ); - let candidate_c_hash = candidate_c.hash(); - - // Add an invalid candidate in the storage. This would introduce a fork. Just to test that - // it's ignored. - let (invalid_pvd, invalid_candidate) = make_committed_candidate( - para_id, - relay_parent_a, - 1, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - vec![], - 2, - vec![], - ) - .unwrap(); - let mut storage = storage.clone(); - storage.add_candidate(candidate_c, pvd_c, CandidateState::Seconded).unwrap(); - - let chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - - storage - .add_candidate(invalid_candidate, invalid_pvd, CandidateState::Seconded) - .unwrap(); - - // Check that C is accepted as a potential unconnected candidate. - assert!(!chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0e]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_hash: candidate_c_hash, - candidate_para: para_id - }, - &storage, - )); - - // Since C is already an unconnected candidate in the storage. - assert!(!chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0f]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: CandidateHash(Hash::repeat_byte(23)), - }, - &storage, - )); - } -} - -#[test] -fn hypothetical_membership_stricter_on_complete_candidates() { - let storage = CandidateStorage::default(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 1000, // watermark is illegal - ); - - let candidate_a_hash = candidate_a.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let chain = FragmentChain::populate(scope, &storage); - - assert!(chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: candidate_a_hash, - }, - &storage, - )); - - assert!(!chain.hypothetical_membership( - HypotheticalCandidate::Complete { - receipt: Arc::new(candidate_a), - persisted_validation_data: pvd_a, - candidate_hash: candidate_a_hash, - }, - &storage, - )); -} - -#[test] -fn hypothetical_membership_with_pending_availability_in_scope() { - let mut storage = CandidateStorage::default(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - let relay_parent_b = Hash::repeat_byte(2); - let relay_parent_c = Hash::repeat_byte(3); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_b, - 1, - vec![0x0b].into(), - vec![0x0c].into(), - 1, - ); - - // Note that relay parent `a` is not allowed. - let base_constraints = make_constraints(1, vec![], vec![0x0a].into()); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - let pending_availability = vec![PendingAvailability { - candidate_hash: candidate_a_hash, - relay_parent: relay_parent_a_info, - }]; - - let relay_parent_b_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number, - hash: relay_parent_b, - storage_root: pvd_b.relay_parent_storage_root, - }; - let relay_parent_c_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number + 1, - hash: relay_parent_c, - storage_root: Hash::zero(), - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a, CandidateState::Seconded).unwrap(); - storage.add_candidate(candidate_b, pvd_b, CandidateState::Backed).unwrap(); - storage.mark_backed(&candidate_a_hash); - - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info, - base_constraints, - pending_availability, - max_depth, - vec![relay_parent_b_info], - ) - .unwrap(); - let chain = FragmentChain::populate(scope, &storage); - - assert_eq!(chain.to_vec().len(), 2); - - let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); - - assert!(chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_hash: candidate_a_hash, - candidate_para: para_id - }, - &storage, - )); - - assert!(!chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - candidate_relay_parent: relay_parent_c, - candidate_para: para_id, - candidate_hash: candidate_d_hash, - }, - &storage, - )); - - assert!(!chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - candidate_relay_parent: relay_parent_c, - candidate_para: para_id, - candidate_hash: candidate_d_hash, - }, - &storage, - )); - - assert!(chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), - candidate_relay_parent: relay_parent_b, - candidate_para: para_id, - candidate_hash: candidate_d_hash, - }, - &storage, - )); -} +// #[test] +// fn test_find_ancestor_path_and_find_backable_to_vec() { +// let para_id = ParaId::from(5u32); +// let relay_parent = Hash::repeat_byte(1); +// let required_parent: HeadData = vec![0xff].into(); +// let max_depth = 5; +// let relay_parent_number = 0; +// let relay_parent_storage_root = Hash::repeat_byte(69); + +// let mut candidates = vec![]; + +// // Candidate 0 +// candidates.push(make_committed_candidate( +// para_id, +// relay_parent, +// 0, +// required_parent.clone(), +// vec![0].into(), +// 0, +// )); +// // Candidate 1 +// candidates.push(make_committed_candidate( +// para_id, +// relay_parent, +// 0, +// vec![0].into(), +// vec![1].into(), +// 0, +// )); +// // Candidate 2 +// candidates.push(make_committed_candidate( +// para_id, +// relay_parent, +// 0, +// vec![1].into(), +// vec![2].into(), +// 0, +// )); +// // Candidate 3 +// candidates.push(make_committed_candidate( +// para_id, +// relay_parent, +// 0, +// vec![2].into(), +// vec![3].into(), +// 0, +// )); +// // Candidate 4 +// candidates.push(make_committed_candidate( +// para_id, +// relay_parent, +// 0, +// vec![3].into(), +// vec![4].into(), +// 0, +// )); +// // Candidate 5 +// candidates.push(make_committed_candidate( +// para_id, +// relay_parent, +// 0, +// vec![4].into(), +// vec![5].into(), +// 0, +// )); + +// let base_constraints = make_constraints(0, vec![0], required_parent.clone()); +// let mut storage = CandidateStorage::default(); + +// let relay_parent_info = RelayChainBlockInfo { +// number: relay_parent_number, +// hash: relay_parent, +// storage_root: relay_parent_storage_root, +// }; + +// for (pvd, candidate) in candidates.iter() { +// storage +// .add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded) +// .unwrap(); +// } +// let candidates = candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::>(); +// let scope = Scope::with_ancestors( +// relay_parent_info.clone(), +// base_constraints.clone(), +// vec![], +// max_depth, +// vec![], +// ) +// .unwrap(); +// let chain = FragmentChain::populate(scope, &storage); + +// assert_eq!(candidates.len(), 6); +// assert_eq!(chain.to_vec().len(), 6); + +// // No ancestors supplied. +// assert_eq!(chain.find_ancestor_path(Ancestors::new()), 0); +// assert_eq!(chain.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); +// assert_eq!( +// chain.find_backable_chain(Ancestors::new(), 1, |_| true), +// [0].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); +// assert_eq!( +// chain.find_backable_chain(Ancestors::new(), 2, |_| true), +// [0, 1].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); +// assert_eq!( +// chain.find_backable_chain(Ancestors::new(), 5, |_| true), +// [0, 1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); + +// for count in 6..10 { +// assert_eq!( +// chain.find_backable_chain(Ancestors::new(), count, |_| true), +// [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); +// } + +// assert_eq!( +// chain.find_backable_chain(Ancestors::new(), 7, |_| true), +// [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); +// assert_eq!( +// chain.find_backable_chain(Ancestors::new(), 10, |_| true), +// [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); + +// // Ancestor which is not part of the chain. Will be ignored. +// let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); +// assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); +// assert_eq!( +// chain.find_backable_chain(ancestors, 4, |_| true), +// [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); +// let ancestors: Ancestors = +// [candidates[1].hash(), CandidateHash::default()].into_iter().collect(); +// assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); +// assert_eq!( +// chain.find_backable_chain(ancestors, 4, |_| true), +// [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); +// let ancestors: Ancestors = +// [candidates[0].hash(), CandidateHash::default()].into_iter().collect(); +// assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1); +// assert_eq!( +// chain.find_backable_chain(ancestors, 4, |_| true), +// [1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); + +// // Ancestors which are part of the chain but don't form a path from root. Will be ignored. +// let ancestors: Ancestors = [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); +// assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); +// assert_eq!( +// chain.find_backable_chain(ancestors, 4, |_| true), +// [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); + +// // Valid ancestors. +// let ancestors: Ancestors = [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] +// .into_iter() +// .collect(); +// assert_eq!(chain.find_ancestor_path(ancestors.clone()), 3); +// assert_eq!( +// chain.find_backable_chain(ancestors.clone(), 2, |_| true), +// [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); +// for count in 3..10 { +// assert_eq!( +// chain.find_backable_chain(ancestors.clone(), count, |_| true), +// [3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); +// } + +// // Valid ancestors with candidates which have been omitted due to timeouts +// let ancestors: Ancestors = [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); +// assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1); +// assert_eq!( +// chain.find_backable_chain(ancestors.clone(), 3, |_| true), +// [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); +// assert_eq!( +// chain.find_backable_chain(ancestors.clone(), 4, |_| true), +// [1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); +// for count in 5..10 { +// assert_eq!( +// chain.find_backable_chain(ancestors.clone(), count, |_| true), +// [1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); +// } + +// let ancestors: Ancestors = [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] +// .into_iter() +// .collect(); +// assert_eq!(chain.find_ancestor_path(ancestors.clone()), 2); +// assert_eq!( +// chain.find_backable_chain(ancestors.clone(), 4, |_| true), +// [2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); + +// // Requested count is 0. +// assert_eq!(chain.find_backable_chain(ancestors, 0, |_| true), vec![]); + +// // Stop when we've found a candidate for which pred returns false. +// let ancestors: Ancestors = [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] +// .into_iter() +// .collect(); +// for count in 1..10 { +// assert_eq!( +// // Stop at 4. +// chain.find_backable_chain(ancestors.clone(), count, |hash| hash != +// &candidates[4].hash()), +// [3].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); +// } + +// // Stop when we've found a candidate which is pending availability +// { +// let scope = Scope::with_ancestors( +// relay_parent_info.clone(), +// base_constraints, +// // Mark the third candidate as pending availability +// vec![PendingAvailability { +// candidate_hash: candidates[3].hash(), +// relay_parent: relay_parent_info, +// }], +// max_depth, +// vec![], +// ) +// .unwrap(); +// let chain = FragmentChain::populate(scope, &storage); +// let ancestors: Ancestors = +// [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); +// assert_eq!( +// // Stop at 4. +// chain.find_backable_chain(ancestors.clone(), 3, |_| true), +// [2].into_iter().map(|i| candidates[i].hash()).collect::>() +// ); +// } +// } + +// #[test] +// fn hypothetical_membership() { +// let mut storage = CandidateStorage::default(); + +// let para_id = ParaId::from(5u32); +// let relay_parent_a = Hash::repeat_byte(1); + +// let (pvd_a, candidate_a) = make_committed_candidate( +// para_id, +// relay_parent_a, +// 0, +// vec![0x0a].into(), +// vec![0x0b].into(), +// 0, +// ); +// let candidate_a_hash = candidate_a.hash(); + +// let (pvd_b, candidate_b) = make_committed_candidate( +// para_id, +// relay_parent_a, +// 0, +// vec![0x0b].into(), +// vec![0x0c].into(), +// 0, +// ); +// let candidate_b_hash = candidate_b.hash(); + +// let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + +// let relay_parent_a_info = RelayChainBlockInfo { +// number: pvd_a.relay_parent_number, +// hash: relay_parent_a, +// storage_root: pvd_a.relay_parent_storage_root, +// }; + +// let max_depth = 4; +// storage.add_candidate(candidate_a, pvd_a, CandidateState::Seconded).unwrap(); +// storage.add_candidate(candidate_b, pvd_b, CandidateState::Seconded).unwrap(); +// let scope = Scope::with_ancestors( +// relay_parent_a_info.clone(), +// base_constraints.clone(), +// vec![], +// max_depth, +// vec![], +// ) +// .unwrap(); +// let chain = FragmentChain::populate(scope, &storage); + +// assert_eq!(chain.to_vec().len(), 2); + +// // Check candidates which are already present +// assert!(chain.hypothetical_membership( +// HypotheticalCandidate::Incomplete { +// parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), +// candidate_relay_parent: relay_parent_a, +// candidate_para: para_id, +// candidate_hash: candidate_a_hash, +// }, +// &storage, +// )); +// assert!(chain.hypothetical_membership( +// HypotheticalCandidate::Incomplete { +// parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), +// candidate_relay_parent: relay_parent_a, +// candidate_para: para_id, +// candidate_hash: candidate_b_hash, +// }, +// &storage, +// )); + +// // Forks not allowed. +// assert!(!chain.hypothetical_membership( +// HypotheticalCandidate::Incomplete { +// parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), +// candidate_relay_parent: relay_parent_a, +// candidate_para: para_id, +// candidate_hash: CandidateHash(Hash::repeat_byte(21)), +// }, +// &storage, +// )); +// assert!(!chain.hypothetical_membership( +// HypotheticalCandidate::Incomplete { +// parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), +// candidate_relay_parent: relay_parent_a, +// candidate_para: para_id, +// candidate_hash: CandidateHash(Hash::repeat_byte(22)), +// }, +// &storage, +// )); + +// // Unknown candidate which builds on top of the current chain. +// assert!(chain.hypothetical_membership( +// HypotheticalCandidate::Incomplete { +// parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), +// candidate_relay_parent: relay_parent_a, +// candidate_para: para_id, +// candidate_hash: CandidateHash(Hash::repeat_byte(23)), +// }, +// &storage, +// )); + +// // Unknown unconnected candidate which may be valid. +// assert!(chain.hypothetical_membership( +// HypotheticalCandidate::Incomplete { +// parent_head_data_hash: HeadData::from(vec![0x0e]).hash(), +// candidate_relay_parent: relay_parent_a, +// candidate_para: para_id, +// candidate_hash: CandidateHash(Hash::repeat_byte(23)), +// }, +// &storage, +// )); + +// // The number of unconnected candidates is limited (chain.len() + unconnected) <= max_depth +// { +// // C will be an unconnected candidate. +// let (pvd_c, candidate_c) = make_committed_candidate( +// para_id, +// relay_parent_a, +// 0, +// vec![0x0e].into(), +// vec![0x0f].into(), +// 0, +// ); +// let candidate_c_hash = candidate_c.hash(); + +// // Add an invalid candidate in the storage. This would introduce a fork. Just to test that +// // it's ignored. +// let (invalid_pvd, invalid_candidate) = make_committed_candidate( +// para_id, +// relay_parent_a, +// 1, +// vec![0x0a].into(), +// vec![0x0b].into(), +// 0, +// ); + +// let scope = Scope::with_ancestors(relay_parent_a_info, base_constraints, vec![], 2, vec![]) +// .unwrap(); +// let mut storage = storage.clone(); +// storage.add_candidate(candidate_c, pvd_c, CandidateState::Seconded).unwrap(); + +// let chain = FragmentChain::populate(scope, &storage); +// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + +// storage +// .add_candidate(invalid_candidate, invalid_pvd, CandidateState::Seconded) +// .unwrap(); + +// // Check that C is accepted as a potential unconnected candidate. +// assert!(!chain.hypothetical_membership( +// HypotheticalCandidate::Incomplete { +// parent_head_data_hash: HeadData::from(vec![0x0e]).hash(), +// candidate_relay_parent: relay_parent_a, +// candidate_hash: candidate_c_hash, +// candidate_para: para_id +// }, +// &storage, +// )); + +// // Since C is already an unconnected candidate in the storage. +// assert!(!chain.hypothetical_membership( +// HypotheticalCandidate::Incomplete { +// parent_head_data_hash: HeadData::from(vec![0x0f]).hash(), +// candidate_relay_parent: relay_parent_a, +// candidate_para: para_id, +// candidate_hash: CandidateHash(Hash::repeat_byte(23)), +// }, +// &storage, +// )); +// } +// } + +// #[test] +// fn hypothetical_membership_stricter_on_complete_candidates() { +// let storage = CandidateStorage::default(); + +// let para_id = ParaId::from(5u32); +// let relay_parent_a = Hash::repeat_byte(1); + +// let (pvd_a, candidate_a) = make_committed_candidate( +// para_id, +// relay_parent_a, +// 0, +// vec![0x0a].into(), +// vec![0x0b].into(), +// 1000, // watermark is illegal +// ); + +// let candidate_a_hash = candidate_a.hash(); + +// let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); +// let pending_availability = Vec::new(); + +// let relay_parent_a_info = RelayChainBlockInfo { +// number: pvd_a.relay_parent_number, +// hash: relay_parent_a, +// storage_root: pvd_a.relay_parent_storage_root, +// }; + +// let max_depth = 4; +// let scope = Scope::with_ancestors( +// relay_parent_a_info, +// base_constraints, +// pending_availability, +// max_depth, +// vec![], +// ) +// .unwrap(); +// let chain = FragmentChain::populate(scope, &storage); + +// assert!(chain.hypothetical_membership( +// HypotheticalCandidate::Incomplete { +// parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), +// candidate_relay_parent: relay_parent_a, +// candidate_para: para_id, +// candidate_hash: candidate_a_hash, +// }, +// &storage, +// )); + +// assert!(!chain.hypothetical_membership( +// HypotheticalCandidate::Complete { +// receipt: Arc::new(candidate_a), +// persisted_validation_data: pvd_a, +// candidate_hash: candidate_a_hash, +// }, +// &storage, +// )); +// } + +// #[test] +// fn hypothetical_membership_with_pending_availability_in_scope() { +// let mut storage = CandidateStorage::default(); + +// let para_id = ParaId::from(5u32); +// let relay_parent_a = Hash::repeat_byte(1); +// let relay_parent_b = Hash::repeat_byte(2); +// let relay_parent_c = Hash::repeat_byte(3); + +// let (pvd_a, candidate_a) = make_committed_candidate( +// para_id, +// relay_parent_a, +// 0, +// vec![0x0a].into(), +// vec![0x0b].into(), +// 0, +// ); +// let candidate_a_hash = candidate_a.hash(); + +// let (pvd_b, candidate_b) = make_committed_candidate( +// para_id, +// relay_parent_b, +// 1, +// vec![0x0b].into(), +// vec![0x0c].into(), +// 1, +// ); + +// // Note that relay parent `a` is not allowed. +// let base_constraints = make_constraints(1, vec![], vec![0x0a].into()); + +// let relay_parent_a_info = RelayChainBlockInfo { +// number: pvd_a.relay_parent_number, +// hash: relay_parent_a, +// storage_root: pvd_a.relay_parent_storage_root, +// }; +// let pending_availability = vec![PendingAvailability { +// candidate_hash: candidate_a_hash, +// relay_parent: relay_parent_a_info, +// }]; + +// let relay_parent_b_info = RelayChainBlockInfo { +// number: pvd_b.relay_parent_number, +// hash: relay_parent_b, +// storage_root: pvd_b.relay_parent_storage_root, +// }; +// let relay_parent_c_info = RelayChainBlockInfo { +// number: pvd_b.relay_parent_number + 1, +// hash: relay_parent_c, +// storage_root: Hash::zero(), +// }; + +// let max_depth = 4; +// storage.add_candidate(candidate_a, pvd_a, CandidateState::Seconded).unwrap(); +// storage.add_candidate(candidate_b, pvd_b, CandidateState::Backed).unwrap(); +// storage.mark_backed(&candidate_a_hash); + +// let scope = Scope::with_ancestors( +// relay_parent_c_info, +// base_constraints, +// pending_availability, +// max_depth, +// vec![relay_parent_b_info], +// ) +// .unwrap(); +// let chain = FragmentChain::populate(scope, &storage); + +// assert_eq!(chain.to_vec().len(), 2); + +// let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); + +// assert!(chain.hypothetical_membership( +// HypotheticalCandidate::Incomplete { +// parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), +// candidate_relay_parent: relay_parent_a, +// candidate_hash: candidate_a_hash, +// candidate_para: para_id +// }, +// &storage, +// )); + +// assert!(!chain.hypothetical_membership( +// HypotheticalCandidate::Incomplete { +// parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), +// candidate_relay_parent: relay_parent_c, +// candidate_para: para_id, +// candidate_hash: candidate_d_hash, +// }, +// &storage, +// )); + +// assert!(!chain.hypothetical_membership( +// HypotheticalCandidate::Incomplete { +// parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), +// candidate_relay_parent: relay_parent_c, +// candidate_para: para_id, +// candidate_hash: candidate_d_hash, +// }, +// &storage, +// )); + +// assert!(chain.hypothetical_membership( +// HypotheticalCandidate::Incomplete { +// parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), +// candidate_relay_parent: relay_parent_b, +// candidate_para: para_id, +// candidate_hash: candidate_d_hash, +// }, +// &storage, +// )); +// } diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 221fbf4c4e60..85ae4926a491 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -542,7 +542,6 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() { }); assert!(view.active_leaves.is_empty()); - assert!(view.candidate_storage.is_empty()); } // Send some candidates and make sure all are found: @@ -718,10 +717,6 @@ fn introduce_candidates_basic() { }); assert_eq!(view.active_leaves.len(), 3); - assert_eq!(view.candidate_storage.len(), 2); - // Two parents and two candidates per para. - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (2, 2)); } #[test] @@ -781,9 +776,6 @@ fn introduce_candidate_multiple_times() { }); assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } #[test] @@ -863,9 +855,6 @@ fn fragment_chain_length_is_bounded() { }); assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } #[test] @@ -949,9 +938,6 @@ fn unconnected_candidate_count_is_bounded() { }); assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } // Send some candidates, check if the candidate won't be found once its relay parent leaves the @@ -1178,7 +1164,6 @@ fn introduce_candidate_parent_leaving_view() { }); assert_eq!(view.active_leaves.len(), 0); - assert_eq!(view.candidate_storage.len(), 0); } // Introduce a candidate to multiple forks, see how the membership is returned. @@ -1249,9 +1234,6 @@ fn introduce_candidate_on_multiple_forks() { }); assert_eq!(view.active_leaves.len(), 2); - assert_eq!(view.candidate_storage.len(), 2); - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } #[test] @@ -1351,9 +1333,6 @@ fn unconnected_candidates_become_connected() { }); assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (4, 4)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } // Backs some candidates and tests `GetBackableCandidates` when requesting a single candidate. @@ -1490,10 +1469,6 @@ fn check_backable_query_single_candidate() { }); assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - // Two parents and two candidates on para 1. - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } // Backs some candidates and tests `GetBackableCandidates` when requesting a multiple candidates. @@ -1786,10 +1761,6 @@ fn check_backable_query_multiple_candidates() { }); assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - // 4 candidates on para 1. - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (4, 4)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } // Test hypothetical membership query. @@ -1954,8 +1925,6 @@ fn check_hypothetical_membership_query() { }); assert_eq!(view.active_leaves.len(), 2); - assert_eq!(view.candidate_storage.len(), 2); - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2)); } #[test] @@ -2080,7 +2049,6 @@ fn check_pvd_query() { }); assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); } // Test simultaneously activating and deactivating leaves, and simultaneously deactivating @@ -2192,7 +2160,6 @@ fn correctly_updates_leaves(#[case] runtime_api_version: u32) { }); assert_eq!(view.active_leaves.len(), 0); - assert_eq!(view.candidate_storage.len(), 0); } #[test] From 386273ee8552ac15dc1a1f475f1885513ed2c2dc Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 12 Jul 2024 15:30:13 +0300 Subject: [PATCH 17/56] some unit tests --- .../src/fragment_chain/mod.rs | 52 +- .../src/fragment_chain/tests.rs | 1060 +++++------------ 2 files changed, 305 insertions(+), 807 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 1727ca4c61cd..2a876613f902 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -154,8 +154,6 @@ pub(crate) enum Error { MultiplePaths, #[error("Attempting to directly introduce a Backed candidate. It should first be introduced as Seconded")] IntroduceBackedCandidate, - #[error("Current backed candidate chain reached the `max_candidate_depth + 1` limit")] - ChainTooLong, #[error("Relay parent {0:?} of the candidate precedes the relay parent {0:?} of a pending availability candidate")] RelayParentPrecedesCandidatePendingAvailability(Hash, Hash), #[error("Candidate would introduce a fork with a pending availability candidate: {0:?}")] @@ -968,34 +966,27 @@ impl FragmentChain { // Try seeing if the parent candidate is in the current chain or if it is the latest // included candidate. If so, get the constraints the candidate must satisfy. - let constraints = if let Some(parent_candidate) = - self.best_chain.by_output_head.get(&parent_head_hash) - { - let Some(parent_candidate_index) = - self.best_chain.chain.iter().position(|c| &c.candidate_hash == parent_candidate) - else { - // Should never really happen. - return Err(Error::ParentCandidateNotFound) - }; - - // We already have enough candidates in this chain. - if parent_candidate_index >= self.scope.max_depth { - return Err(Error::ChainTooLong) - } + let constraints = + if let Some(parent_candidate) = self.best_chain.by_output_head.get(&parent_head_hash) { + let Some(parent_candidate) = + self.best_chain.chain.iter().find(|c| &c.candidate_hash == parent_candidate) + else { + // Should never really happen. + return Err(Error::ParentCandidateNotFound) + }; - let parent_candidate = &self.best_chain.chain[parent_candidate_index]; - self.scope - .base_constraints - .apply_modifications(&parent_candidate.cumulative_modifications) - .map_err(Error::ComputeConstraints)? - } else if self.scope.base_constraints.required_parent.hash() == parent_head_hash { - // It builds on the latest included candidate. - self.scope.base_constraints.clone() - } else { - // If the parent is not yet part of the chain, there's nothing else we can check for - // now. - return Ok(()) - }; + self.scope + .base_constraints + .apply_modifications(&parent_candidate.cumulative_modifications) + .map_err(Error::ComputeConstraints)? + } else if self.scope.base_constraints.required_parent.hash() == parent_head_hash { + // It builds on the latest included candidate. + self.scope.base_constraints.clone() + } else { + // If the parent is not yet part of the chain, there's nothing else we can check for + // now. + return Ok(()) + }; // Check for cycles or invalid tree transitions. if let Some(ref output_head_hash) = candidate.output_head_data_hash() { @@ -1037,6 +1028,9 @@ impl FragmentChain { // Start out with the candidates in the chain. They are all valid candidates. let mut queue: VecDeque<_> = self.best_chain.chain.iter().map(|c| (c.parent_head_data_hash, true)).collect(); + if queue.is_empty() { + queue.push_front((self.scope.base_constraints.required_parent.hash(), true)); + } // To make sure that cycles don't make us loop forever, keep track of the visited parent // heads. let mut visited = HashSet::new(); diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index c7b7538e7bb7..701743ca5768 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -56,7 +56,7 @@ fn make_committed_candidate( let persisted_validation_data = PersistedValidationData { parent_head, relay_parent_number, - relay_parent_storage_root: Hash::repeat_byte(69), + relay_parent_storage_root: Hash::zero(), max_pov_size: 1_000_000, }; @@ -376,797 +376,301 @@ fn candidate_storage_methods() { ); } -// #[test] -// fn populate_and_extend_from_storage_empty() { -// // Empty chain and empty storage. -// let storage = CandidateStorage::default(); -// let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); -// let pending_availability = Vec::new(); - -// let scope = Scope::with_ancestors( -// RelayChainBlockInfo { -// number: 1, -// hash: Hash::repeat_byte(1), -// storage_root: Hash::repeat_byte(2), -// }, -// base_constraints, -// pending_availability, -// 4, -// vec![], -// ) -// .unwrap(); -// let mut chain = FragmentChain::populate(scope, &storage); -// assert!(chain.to_vec().is_empty()); - -// chain.extend_from_storage(&storage); -// assert!(chain.to_vec().is_empty()); -// } - -// #[test] -// fn populate_and_extend_from_storage_with_existing_empty_to_vec() { -// let mut storage = CandidateStorage::default(); - -// let para_id = ParaId::from(5u32); -// let relay_parent_a = Hash::repeat_byte(1); -// let relay_parent_b = Hash::repeat_byte(2); -// let relay_parent_c = Hash::repeat_byte(3); - -// let (pvd_a, candidate_a) = make_committed_candidate( -// para_id, -// relay_parent_a, -// 0, -// vec![0x0a].into(), -// vec![0x0b].into(), -// 0, -// ); -// let candidate_a_hash = candidate_a.hash(); - -// let (pvd_b, candidate_b) = make_committed_candidate( -// para_id, -// relay_parent_b, -// 1, -// vec![0x0b].into(), -// vec![0x0c].into(), -// 1, -// ); -// let candidate_b_hash = candidate_b.hash(); - -// let (pvd_c, candidate_c) = make_committed_candidate( -// para_id, -// relay_parent_c, -// 2, -// vec![0x0c].into(), -// vec![0x0d].into(), -// 2, -// ); -// let candidate_c_hash = candidate_c.hash(); - -// let relay_parent_a_info = RelayChainBlockInfo { -// number: pvd_a.relay_parent_number, -// hash: relay_parent_a, -// storage_root: pvd_a.relay_parent_storage_root, -// }; -// let relay_parent_b_info = RelayChainBlockInfo { -// number: pvd_b.relay_parent_number, -// hash: relay_parent_b, -// storage_root: pvd_b.relay_parent_storage_root, -// }; -// let relay_parent_c_info = RelayChainBlockInfo { -// number: pvd_c.relay_parent_number, -// hash: relay_parent_c, -// storage_root: pvd_c.relay_parent_storage_root, -// }; - -// let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); -// let pending_availability = Vec::new(); - -// let ancestors = vec![ -// // These need to be ordered in reverse. -// relay_parent_b_info.clone(), -// relay_parent_a_info.clone(), -// ]; - -// storage -// .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) -// .unwrap(); -// storage -// .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Backed) -// .unwrap(); -// storage -// .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Backed) -// .unwrap(); - -// // Candidate A doesn't adhere to the base constraints. -// { -// for wrong_constraints in [ -// // Different required parent -// make_constraints(0, vec![0], vec![0x0e].into()), -// // Min relay parent number is wrong -// make_constraints(1, vec![0], vec![0x0a].into()), -// ] { -// let scope = Scope::with_ancestors( -// relay_parent_c_info.clone(), -// wrong_constraints.clone(), -// pending_availability.clone(), -// 4, -// ancestors.clone(), -// ) -// .unwrap(); -// let mut chain = FragmentChain::populate(scope, &storage); - -// assert!(chain.to_vec().is_empty()); - -// chain.extend_from_storage(&storage); -// assert!(chain.to_vec().is_empty()); - -// // If the min relay parent number is wrong, candidate A can never become valid. -// // Otherwise, if only the required parent doesn't match, candidate A is still a -// // potential candidate. -// if wrong_constraints.min_relay_parent_number == 1 { -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &storage, -// &candidate_a.hash(), -// &candidate_a.descriptor.relay_parent, -// pvd_a.parent_head.hash(), -// Some(candidate_a.commitments.head_data.hash()), -// ), -// PotentialAddition::None -// ); -// } else { -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &storage, -// &candidate_a.hash(), -// &candidate_a.descriptor.relay_parent, -// pvd_a.parent_head.hash(), -// Some(candidate_a.commitments.head_data.hash()), -// ), -// PotentialAddition::Anyhow -// ); -// } - -// // All other candidates can always be potential candidates. -// for (candidate, pvd) in -// [(candidate_b.clone(), pvd_b.clone()), (candidate_c.clone(), pvd_c.clone())] -// { -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &storage, -// &candidate.hash(), -// &candidate.descriptor.relay_parent, -// pvd.parent_head.hash(), -// Some(candidate.commitments.head_data.hash()), -// ), -// PotentialAddition::Anyhow -// ); -// } -// } -// } - -// // Various max depths. -// { -// // depth is 0, will only allow 1 candidate -// let scope = Scope::with_ancestors( -// relay_parent_c_info.clone(), -// base_constraints.clone(), -// pending_availability.clone(), -// 0, -// ancestors.clone(), -// ) -// .unwrap(); -// // Before populating the chain, all candidates are potential candidates. However, they can -// // only be added as connected candidates, because only one candidates is allowed by max -// // depth -// let chain = FragmentChain::populate(scope.clone(), &CandidateStorage::default()); -// for (candidate, pvd) in [ -// (candidate_a.clone(), pvd_a.clone()), -// (candidate_b.clone(), pvd_b.clone()), -// (candidate_c.clone(), pvd_c.clone()), -// ] { -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &CandidateStorage::default(), -// &candidate.hash(), -// &candidate.descriptor.relay_parent, -// pvd.parent_head.hash(), -// Some(candidate.commitments.head_data.hash()), -// ), -// PotentialAddition::IfConnected -// ); -// } -// let mut chain = FragmentChain::populate(scope, &storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash]); -// chain.extend_from_storage(&storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash]); -// // since depth is maxed out, we can't add more potential candidates -// // candidate A is no longer a potential candidate because it's already present. -// for (candidate, pvd) in [ -// (candidate_a.clone(), pvd_a.clone()), -// (candidate_b.clone(), pvd_b.clone()), -// (candidate_c.clone(), pvd_c.clone()), -// ] { -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &storage, -// &candidate.hash(), -// &candidate.descriptor.relay_parent, -// pvd.parent_head.hash(), -// Some(candidate.commitments.head_data.hash()), -// ), -// PotentialAddition::None -// ); -// } - -// // depth is 1, allows two candidates -// let scope = Scope::with_ancestors( -// relay_parent_c_info.clone(), -// base_constraints.clone(), -// pending_availability.clone(), -// 1, -// ancestors.clone(), -// ) -// .unwrap(); -// // Before populating the chain, all candidates can be added as potential. -// let mut modified_storage = CandidateStorage::default(); -// let chain = FragmentChain::populate(scope.clone(), &modified_storage); -// for (candidate, pvd) in [ -// (candidate_a.clone(), pvd_a.clone()), -// (candidate_b.clone(), pvd_b.clone()), -// (candidate_c.clone(), pvd_c.clone()), -// ] { -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &modified_storage, -// &candidate.hash(), -// &candidate.descriptor.relay_parent, -// pvd.parent_head.hash(), -// Some(candidate.commitments.head_data.hash()), -// ), -// PotentialAddition::Anyhow -// ); -// } -// // Add an unconnected candidate. We now should only allow a Connected candidate, because max -// // depth only allows one more candidate. -// modified_storage -// .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) -// .unwrap(); -// let chain = FragmentChain::populate(scope.clone(), &modified_storage); -// for (candidate, pvd) in -// [(candidate_a.clone(), pvd_a.clone()), (candidate_c.clone(), pvd_c.clone())] -// { -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &modified_storage, -// &candidate.hash(), -// &candidate.descriptor.relay_parent, -// pvd.parent_head.hash(), -// Some(candidate.commitments.head_data.hash()), -// ), -// PotentialAddition::IfConnected -// ); -// } - -// // Now try populating from all candidates. -// let mut chain = FragmentChain::populate(scope, &storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); -// chain.extend_from_storage(&storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); -// // since depth is maxed out, we can't add more potential candidates -// // candidate A and B are no longer a potential candidate because they're already present. -// for (candidate, pvd) in [ -// (candidate_a.clone(), pvd_a.clone()), -// (candidate_b.clone(), pvd_b.clone()), -// (candidate_c.clone(), pvd_c.clone()), -// ] { -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &storage, -// &candidate.hash(), -// &candidate.descriptor.relay_parent, -// pvd.parent_head.hash(), -// Some(candidate.commitments.head_data.hash()), -// ), -// PotentialAddition::None -// ); -// } - -// // depths larger than 2, allows all candidates -// for depth in 2..6 { -// let scope = Scope::with_ancestors( -// relay_parent_c_info.clone(), -// base_constraints.clone(), -// pending_availability.clone(), -// depth, -// ancestors.clone(), -// ) -// .unwrap(); -// let mut chain = FragmentChain::populate(scope, &storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); -// chain.extend_from_storage(&storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); -// // Candidates are no longer potential candidates because they're already part of the -// // chain. -// for (candidate, pvd) in [ -// (candidate_a.clone(), pvd_a.clone()), -// (candidate_b.clone(), pvd_b.clone()), -// (candidate_c.clone(), pvd_c.clone()), -// ] { -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &storage, -// &candidate.hash(), -// &candidate.descriptor.relay_parent, -// pvd.parent_head.hash(), -// Some(candidate.commitments.head_data.hash()), -// ), -// PotentialAddition::None -// ); -// } -// } -// } - -// // Wrong relay parents -// { -// // Candidates A has relay parent out of scope. -// let ancestors_without_a = vec![relay_parent_b_info.clone()]; -// let scope = Scope::with_ancestors( -// relay_parent_c_info.clone(), -// base_constraints.clone(), -// pending_availability.clone(), -// 4, -// ancestors_without_a, -// ) -// .unwrap(); - -// let mut chain = FragmentChain::populate(scope, &storage); -// assert!(chain.to_vec().is_empty()); - -// chain.extend_from_storage(&storage); -// assert!(chain.to_vec().is_empty()); - -// // Candidate A is not a potential candidate, but candidates B and C still are. -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &storage, -// &candidate_a.hash(), -// &candidate_a.descriptor.relay_parent, -// pvd_a.parent_head.hash(), -// Some(candidate_a.commitments.head_data.hash()), -// ), -// PotentialAddition::None -// ); -// for (candidate, pvd) in -// [(candidate_b.clone(), pvd_b.clone()), (candidate_c.clone(), pvd_c.clone())] -// { -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &storage, -// &candidate.hash(), -// &candidate.descriptor.relay_parent, -// pvd.parent_head.hash(), -// Some(candidate.commitments.head_data.hash()), -// ), -// PotentialAddition::Anyhow -// ); -// } - -// // Candidate C has the same relay parent as candidate A's parent. Relay parent not allowed -// // to move backwards -// let mut modified_storage = storage.clone(); -// modified_storage.remove_candidate(&candidate_c_hash); -// let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( -// para_id, -// relay_parent_a, -// 1, -// vec![0x0c].into(), -// vec![0x0d].into(), -// 2, -// ); -// modified_storage -// .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) -// .unwrap(); -// let scope = Scope::with_ancestors( -// relay_parent_c_info.clone(), -// base_constraints.clone(), -// pending_availability.clone(), -// 4, -// ancestors.clone(), -// ) -// .unwrap(); -// let mut chain = FragmentChain::populate(scope, &modified_storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); -// chain.extend_from_storage(&modified_storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - -// // Candidate C is not even a potential candidate. -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &modified_storage, -// &wrong_candidate_c.hash(), -// &wrong_candidate_c.descriptor.relay_parent, -// wrong_pvd_c.parent_head.hash(), -// Some(wrong_candidate_c.commitments.head_data.hash()), -// ), -// PotentialAddition::None -// ); -// } - -// // Parachain fork and cycles are not allowed. -// { -// // Candidate C has the same parent as candidate B. -// let mut modified_storage = storage.clone(); -// modified_storage.remove_candidate(&candidate_c_hash); -// let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( -// para_id, -// relay_parent_c, -// 2, -// vec![0x0b].into(), -// vec![0x0d].into(), -// 2, -// ); -// modified_storage -// .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) -// .unwrap(); -// let scope = Scope::with_ancestors( -// relay_parent_c_info.clone(), -// base_constraints.clone(), -// pending_availability.clone(), -// 4, -// ancestors.clone(), -// ) -// .unwrap(); -// let mut chain = FragmentChain::populate(scope, &modified_storage); -// // We'll either have A->B or A->C. It's not deterministic because CandidateStorage uses -// // HashSets and HashMaps. -// if chain.to_vec() == vec![candidate_a_hash, candidate_b_hash] { -// chain.extend_from_storage(&modified_storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); -// // Candidate C is not even a potential candidate. -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &modified_storage, -// &wrong_candidate_c.hash(), -// &wrong_candidate_c.descriptor.relay_parent, -// wrong_pvd_c.parent_head.hash(), -// Some(wrong_candidate_c.commitments.head_data.hash()), -// ), -// PotentialAddition::None -// ); -// } else if chain.to_vec() == vec![candidate_a_hash, wrong_candidate_c.hash()] { -// chain.extend_from_storage(&modified_storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, wrong_candidate_c.hash()]); -// // Candidate B is not even a potential candidate. -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &modified_storage, -// &candidate_b.hash(), -// &candidate_b.descriptor.relay_parent, -// pvd_b.parent_head.hash(), -// Some(candidate_b.commitments.head_data.hash()), -// ), -// PotentialAddition::None -// ); -// } else { -// panic!("Unexpected chain: {:?}", chain.to_vec()); -// } - -// // Candidate C is a 0-length cycle. -// // Candidate C has the same parent as candidate B. -// let mut modified_storage = storage.clone(); -// modified_storage.remove_candidate(&candidate_c_hash); -// let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( -// para_id, -// relay_parent_c, -// 2, -// vec![0x0c].into(), -// vec![0x0c].into(), -// 2, -// ); -// modified_storage -// .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) -// .unwrap(); -// let scope = Scope::with_ancestors( -// relay_parent_c_info.clone(), -// base_constraints.clone(), -// pending_availability.clone(), -// 4, -// ancestors.clone(), -// ) -// .unwrap(); -// let mut chain = FragmentChain::populate(scope, &modified_storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); -// chain.extend_from_storage(&modified_storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); -// // Candidate C is not even a potential candidate. -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &modified_storage, -// &wrong_candidate_c.hash(), -// &wrong_candidate_c.descriptor.relay_parent, -// wrong_pvd_c.parent_head.hash(), -// Some(wrong_candidate_c.commitments.head_data.hash()), -// ), -// PotentialAddition::None -// ); - -// // Candidate C points back to the pre-state of candidate C. -// let mut modified_storage = storage.clone(); -// modified_storage.remove_candidate(&candidate_c_hash); -// let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( -// para_id, -// relay_parent_c, -// 2, -// vec![0x0c].into(), -// vec![0x0b].into(), -// 2, -// ); -// modified_storage -// .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) -// .unwrap(); -// let scope = Scope::with_ancestors( -// relay_parent_c_info.clone(), -// base_constraints.clone(), -// pending_availability.clone(), -// 4, -// ancestors.clone(), -// ) -// .unwrap(); -// let mut chain = FragmentChain::populate(scope, &modified_storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); -// chain.extend_from_storage(&modified_storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); -// // Candidate C is not even a potential candidate. -// assert_eq!( -// chain.can_add_candidate_as_potential( -// &modified_storage, -// &wrong_candidate_c.hash(), -// &wrong_candidate_c.descriptor.relay_parent, -// wrong_pvd_c.parent_head.hash(), -// Some(wrong_candidate_c.commitments.head_data.hash()), -// ), -// PotentialAddition::None -// ); -// } - -// // Test with candidates pending availability -// { -// // Valid options -// for pending in [ -// vec![PendingAvailability { -// candidate_hash: candidate_a_hash, -// relay_parent: relay_parent_a_info.clone(), -// }], -// vec![ -// PendingAvailability { -// candidate_hash: candidate_a_hash, -// relay_parent: relay_parent_a_info.clone(), -// }, -// PendingAvailability { -// candidate_hash: candidate_b_hash, -// relay_parent: relay_parent_b_info.clone(), -// }, -// ], -// vec![ -// PendingAvailability { -// candidate_hash: candidate_a_hash, -// relay_parent: relay_parent_a_info.clone(), -// }, -// PendingAvailability { -// candidate_hash: candidate_b_hash, -// relay_parent: relay_parent_b_info.clone(), -// }, -// PendingAvailability { -// candidate_hash: candidate_c_hash, -// relay_parent: relay_parent_c_info.clone(), -// }, -// ], -// ] { -// let scope = Scope::with_ancestors( -// relay_parent_c_info.clone(), -// base_constraints.clone(), -// pending, -// 3, -// ancestors.clone(), -// ) -// .unwrap(); -// let mut chain = FragmentChain::populate(scope, &storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); -// chain.extend_from_storage(&storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); -// } - -// // Relay parents of pending availability candidates can be out of scope -// // Relay parent of candidate A is out of scope. -// let ancestors_without_a = vec![relay_parent_b_info.clone()]; -// let scope = Scope::with_ancestors( -// relay_parent_c_info.clone(), -// base_constraints.clone(), -// vec![PendingAvailability { -// candidate_hash: candidate_a_hash, -// relay_parent: relay_parent_a_info.clone(), -// }], -// 4, -// ancestors_without_a, -// ) -// .unwrap(); -// let mut chain = FragmentChain::populate(scope, &storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); -// chain.extend_from_storage(&storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - -// // Even relay parents of pending availability candidates which are out of scope cannot move -// // backwards. -// let scope = Scope::with_ancestors( -// relay_parent_c_info.clone(), -// base_constraints.clone(), -// vec![ -// PendingAvailability { -// candidate_hash: candidate_a_hash, -// relay_parent: RelayChainBlockInfo { -// hash: relay_parent_a_info.hash, -// number: 1, -// storage_root: relay_parent_a_info.storage_root, -// }, -// }, -// PendingAvailability { -// candidate_hash: candidate_b_hash, -// relay_parent: RelayChainBlockInfo { -// hash: relay_parent_b_info.hash, -// number: 0, -// storage_root: relay_parent_b_info.storage_root, -// }, -// }, -// ], -// 4, -// vec![], -// ) -// .unwrap(); -// let mut chain = FragmentChain::populate(scope, &storage); -// assert!(chain.to_vec().is_empty()); - -// chain.extend_from_storage(&storage); -// assert!(chain.to_vec().is_empty()); -// } -// } +#[test] +fn populate_empty() { + // Empty chain and empty storage. + let storage = CandidateStorage::default(); + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); -// #[test] -// fn extend_from_storage_with_existing_to_vec() { -// let para_id = ParaId::from(5u32); -// let relay_parent_a = Hash::repeat_byte(1); -// let relay_parent_b = Hash::repeat_byte(2); -// let relay_parent_d = Hash::repeat_byte(3); + let scope = Scope::with_ancestors( + RelayChainBlockInfo { + number: 1, + hash: Hash::repeat_byte(1), + storage_root: Hash::repeat_byte(2), + }, + base_constraints, + Vec::new(), + 4, + vec![], + ) + .unwrap(); + let chain = FragmentChain::populate(scope, storage); + assert_eq!(chain.best_chain_len(), 0); + assert_eq!(chain.unconnected_len(), 0); +} -// let (pvd_a, candidate_a) = make_committed_candidate( -// para_id, -// relay_parent_a, -// 0, -// vec![0x0a].into(), -// vec![0x0b].into(), -// 0, -// ); -// let candidate_a_hash = candidate_a.hash(); +#[test] +fn populate_with_empty_best_chain() { + let mut storage = CandidateStorage::default(); -// let (pvd_b, candidate_b) = make_committed_candidate( -// para_id, -// relay_parent_b, -// 1, -// vec![0x0b].into(), -// vec![0x0c].into(), -// 1, -// ); -// let candidate_b_hash = candidate_b.hash(); + let para_id = ParaId::from(5u32); + let relay_parent_x = Hash::repeat_byte(1); + let relay_parent_y = Hash::repeat_byte(2); + let relay_parent_z = Hash::repeat_byte(3); + let relay_parent_x_info = + RelayChainBlockInfo { number: 0, hash: relay_parent_x, storage_root: Hash::zero() }; + let relay_parent_y_info = + RelayChainBlockInfo { number: 1, hash: relay_parent_y, storage_root: Hash::zero() }; + let relay_parent_z_info = + RelayChainBlockInfo { number: 2, hash: relay_parent_z, storage_root: Hash::zero() }; -// let (pvd_c, candidate_c) = make_committed_candidate( -// para_id, -// // Use the same relay parent number as B to test that it doesn't need to change between -// // candidates. -// relay_parent_b, -// 1, -// vec![0x0c].into(), -// vec![0x0d].into(), -// 1, -// ); -// let candidate_c_hash = candidate_c.hash(); + let ancestors = vec![ + // These need to be ordered in reverse. + relay_parent_y_info.clone(), + relay_parent_x_info.clone(), + ]; -// // Candidate D will never be added to the chain. -// let (pvd_d, candidate_d) = make_committed_candidate( -// para_id, -// relay_parent_d, -// 2, -// vec![0x0e].into(), -// vec![0x0f].into(), -// 1, -// ); + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); -// let relay_parent_a_info = RelayChainBlockInfo { -// number: pvd_a.relay_parent_number, -// hash: relay_parent_a, -// storage_root: pvd_a.relay_parent_storage_root, -// }; -// let relay_parent_b_info = RelayChainBlockInfo { -// number: pvd_b.relay_parent_number, -// hash: relay_parent_b, -// storage_root: pvd_b.relay_parent_storage_root, -// }; -// let relay_parent_d_info = RelayChainBlockInfo { -// number: pvd_d.relay_parent_number, -// hash: relay_parent_d, -// storage_root: pvd_d.relay_parent_storage_root, -// }; + // Candidates A -> B -> C. They are all backed + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0x0a].into(), + vec![0x0b].into(), + relay_parent_x_info.number, + ); + let candidate_a_hash = candidate_a.hash(); + storage + .add_candidate_entry( + CandidateEntry::new(candidate_a_hash, candidate_a, pvd_a, CandidateState::Backed) + .unwrap(), + ) + .unwrap(); + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_y_info.hash, + relay_parent_y_info.number, + vec![0x0b].into(), + vec![0x0c].into(), + relay_parent_y_info.number, + ); + let candidate_b_hash = candidate_b.hash(); + storage + .add_candidate_entry( + CandidateEntry::new(candidate_b_hash, candidate_b, pvd_b, CandidateState::Backed) + .unwrap(), + ) + .unwrap(); + let (pvd_c, candidate_c) = make_committed_candidate( + para_id, + relay_parent_z_info.hash, + relay_parent_z_info.number, + vec![0x0c].into(), + vec![0x0d].into(), + relay_parent_z_info.number, + ); + let candidate_c_hash = candidate_c.hash(); + storage + .add_candidate_entry( + CandidateEntry::new(candidate_c_hash, candidate_c, pvd_c, CandidateState::Backed) + .unwrap(), + ) + .unwrap(); -// let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); -// let pending_availability = Vec::new(); + // Candidate A doesn't adhere to the base constraints. + { + for wrong_constraints in [ + // Different required parent + make_constraints( + relay_parent_x_info.number, + vec![relay_parent_x_info.number], + vec![0x0e].into(), + ), + // Min relay parent number is wrong + make_constraints(relay_parent_y_info.number, vec![0], vec![0x0a].into()), + ] { + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + wrong_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let chain = FragmentChain::populate(scope, storage.clone()); + + assert!(chain.best_chain_vec().is_empty()); + + // If the min relay parent number is wrong, candidate A can never become valid. + // Otherwise, if only the required parent doesn't match, candidate A is still a + // potential candidate. + if wrong_constraints.min_relay_parent_number == relay_parent_y_info.number { + // If A is not a potential candidate, neither are his descendants. + assert_eq!(chain.unconnected_len(), 0); + } else { + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_a_hash, candidate_b_hash, candidate_c_hash].into_iter().collect() + ); + } + } + } -// let ancestors = vec![ -// // These need to be ordered in reverse. -// relay_parent_b_info.clone(), -// relay_parent_a_info.clone(), -// ]; + // Depth is 0, only allows one candidate, but the others will be kept as potential. + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 0, + ancestors.clone(), + ) + .unwrap(); + let chain = FragmentChain::populate(scope, storage.clone()); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_b_hash, candidate_c_hash].into_iter().collect() + ); -// // Already had A and C in the storage. Introduce B, which should add both B and C to the chain -// // now. -// { -// let mut storage = CandidateStorage::default(); -// storage -// .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) -// .unwrap(); -// storage -// .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Seconded) -// .unwrap(); -// storage -// .add_candidate(candidate_d.clone(), pvd_d.clone(), CandidateState::Seconded) -// .unwrap(); + // depth is 1, allows two candidates + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 1, + ancestors.clone(), + ) + .unwrap(); + let chain = FragmentChain::populate(scope, storage.clone()); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_c_hash].into_iter().collect() + ); -// let scope = Scope::with_ancestors( -// relay_parent_d_info.clone(), -// base_constraints.clone(), -// pending_availability.clone(), -// 4, -// ancestors.clone(), -// ) -// .unwrap(); -// let mut chain = FragmentChain::populate(scope, &storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash]); + // depth is larger than 2, allows all three candidates + for depth in 2..6 { + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + depth, + ancestors.clone(), + ) + .unwrap(); + let chain = FragmentChain::populate(scope, storage.clone()); + assert_eq!( + chain.best_chain_vec(), + vec![candidate_a_hash, candidate_b_hash, candidate_c_hash] + ); + assert_eq!(chain.unconnected_len(), 0); + } -// storage -// .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) -// .unwrap(); -// chain.extend_from_storage(&storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); -// } + // Candidate A has relay parent out of scope. Candidates B and C will also be deleted since they + // form a chain with A. + let ancestors_without_x = vec![relay_parent_y_info.clone()]; + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors_without_x, + ) + .unwrap(); + let chain = FragmentChain::populate(scope, storage.clone()); + assert!(chain.best_chain_vec().is_empty()); + assert_eq!(chain.unconnected_len(), 0); -// // Already had A and B in the chain. Introduce C. -// { -// let mut storage = CandidateStorage::default(); -// storage -// .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) -// .unwrap(); -// storage -// .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) -// .unwrap(); -// storage -// .add_candidate(candidate_d.clone(), pvd_d.clone(), CandidateState::Seconded) -// .unwrap(); + // Candidates A and B have relay parents out of scope. Candidate C will also be deleted since it + // forms a chain with A and B. + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + vec![], + ) + .unwrap(); + let chain = FragmentChain::populate(scope, storage.clone()); + assert!(chain.best_chain_vec().is_empty()); + assert_eq!(chain.unconnected_len(), 0); + + // Candidate C has the same relay parent as candidate A's parent. Relay parent not allowed + // to move backwards + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( + para_id, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0x0c].into(), + vec![0x0d].into(), + 0, + ); + modified_storage + .add_candidate_entry( + CandidateEntry::new( + wrong_candidate_c.hash(), + wrong_candidate_c, + wrong_pvd_c, + CandidateState::Backed, + ) + .unwrap(), + ) + .unwrap(); + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); -// let scope = Scope::with_ancestors( -// relay_parent_d_info.clone(), -// base_constraints.clone(), -// pending_availability.clone(), -// 4, -// ancestors.clone(), -// ) -// .unwrap(); -// let mut chain = FragmentChain::populate(scope, &storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + let chain = FragmentChain::populate(scope, modified_storage); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); + assert_eq!(chain.unconnected_len(), 0); + + // Parachain cycle is not allowed. Make C have the same parent as A. + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( + para_id, + relay_parent_z_info.hash, + relay_parent_z_info.number, + vec![0x0c].into(), + vec![0x0a].into(), + relay_parent_z_info.number, + ); + modified_storage + .add_candidate_entry( + CandidateEntry::new( + wrong_candidate_c.hash(), + wrong_candidate_c, + wrong_pvd_c, + CandidateState::Backed, + ) + .unwrap(), + ) + .unwrap(); + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); -// storage -// .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Seconded) -// .unwrap(); -// chain.extend_from_storage(&storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); -// } -// } + let chain = FragmentChain::populate(scope, modified_storage); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); + assert_eq!(chain.unconnected_len(), 0); + + // More complex case: + // max_depth is 2 (a chain of max depth 3). + // A -> B -> C are the best backable chain. + // D is backed but would exceed the max depth. + // F is unconnected and seconded. + // A1 has same parent as A, is backed but has a higher candidate hash. It'll therefore be + // deleted. + // A1 has underneath a subtree that will all need to be trimmed. A1 -> B1. B1 -> C1 + // and B1 -> C2. (C1 is backed). C1 -> D1 and C1 -> D2. + // A2 is seconded but is kept because it has a lower candidate hash than A. + // A2 points to B2, which is backed. +} #[test] fn test_find_ancestor_path_and_find_backable_chain_empty_best_chain() { From a56f371e1a48c5d07ab4f76d48067e015c237865 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 24 Jul 2024 16:20:47 +0300 Subject: [PATCH 18/56] more unit tests --- .../core/prospective-parachains/Cargo.toml | 1 - .../src/fragment_chain/mod.rs | 21 +- .../src/fragment_chain/tests.rs | 361 +++++++++++++++++- 3 files changed, 368 insertions(+), 15 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 97da5a1e94a0..29d805510923 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -28,7 +28,6 @@ polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-node-subsystem-types = { workspace = true, default-features = true } polkadot-primitives-test-helpers = { workspace = true } sp-core = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 2a876613f902..08b8d1d6ad06 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -958,7 +958,7 @@ impl FragmentChain { } // If the candidate is backed and in the current chain, accept only a candidate - // according to the fork selection rul. + // according to the fork selection rule. if fork_selection_rule(other_candidate, &candidate.candidate_hash()) == Ordering::Less { return Err(Error::ForkChoiceRule(*other_candidate)) } @@ -966,7 +966,7 @@ impl FragmentChain { // Try seeing if the parent candidate is in the current chain or if it is the latest // included candidate. If so, get the constraints the candidate must satisfy. - let constraints = + let (constraints, maybe_min_relay_parent_number) = if let Some(parent_candidate) = self.best_chain.by_output_head.get(&parent_head_hash) { let Some(parent_candidate) = self.best_chain.chain.iter().find(|c| &c.candidate_hash == parent_candidate) @@ -975,13 +975,16 @@ impl FragmentChain { return Err(Error::ParentCandidateNotFound) }; - self.scope - .base_constraints - .apply_modifications(&parent_candidate.cumulative_modifications) - .map_err(Error::ComputeConstraints)? + ( + self.scope + .base_constraints + .apply_modifications(&parent_candidate.cumulative_modifications) + .map_err(Error::ComputeConstraints)?, + self.scope.ancestor(&parent_candidate.relay_parent()).map(|rp| rp.number), + ) } else if self.scope.base_constraints.required_parent.hash() == parent_head_hash { // It builds on the latest included candidate. - self.scope.base_constraints.clone() + (self.scope.base_constraints.clone(), None) } else { // If the parent is not yet part of the chain, there's nothing else we can check for // now. @@ -1013,8 +1016,8 @@ impl FragmentChain { return Err(Error::RelayParentMovedBackwards) } - if let Some(earliest_rp) = self.earliest_relay_parent() { - if relay_parent.number < earliest_rp.number { + if let Some(earliest_rp) = maybe_min_relay_parent_number { + if relay_parent.number < earliest_rp { return Err(Error::RelayParentMovedBackwards) } } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 701743ca5768..4a236dd4c924 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -559,8 +559,8 @@ fn populate_with_empty_best_chain() { assert_eq!(chain.unconnected_len(), 0); } - // Candidate A has relay parent out of scope. Candidates B and C will also be deleted since they - // form a chain with A. + // Candidate A has relay parent out of scope. Candidates B and C will also be deleted since + // they form a chain with A. let ancestors_without_x = vec![relay_parent_y_info.clone()]; let scope = Scope::with_ancestors( relay_parent_z_info.clone(), @@ -574,8 +574,8 @@ fn populate_with_empty_best_chain() { assert!(chain.best_chain_vec().is_empty()); assert_eq!(chain.unconnected_len(), 0); - // Candidates A and B have relay parents out of scope. Candidate C will also be deleted since it - // forms a chain with A and B. + // Candidates A and B have relay parents out of scope. Candidate C will also be deleted since + // it forms a chain with A and B. let scope = Scope::with_ancestors( relay_parent_z_info.clone(), base_constraints.clone(), @@ -624,6 +624,87 @@ fn populate_with_empty_best_chain() { assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); assert_eq!(chain.unconnected_len(), 0); + // Candidate C is an unconnected candidate. + // C's relay parent is allowed to move backwards from B's relay parent. + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (unconnected_pvd_c, unconnected_candidate_c) = make_committed_candidate( + para_id, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0x0d].into(), + vec![0x0e].into(), + 0, + ); + let unconnected_candidate_c_hash = unconnected_candidate_c.hash(); + modified_storage + .add_candidate_entry( + CandidateEntry::new( + unconnected_candidate_c_hash, + unconnected_candidate_c, + unconnected_pvd_c, + CandidateState::Backed, + ) + .unwrap(), + ) + .unwrap(); + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + + let chain = FragmentChain::populate(scope, modified_storage.clone()); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [unconnected_candidate_c_hash].into_iter().collect() + ); + + // Candidate A is a pending availability candidate and Candidate C is an unconnected candidate, + // C's relay parent is not allowed to move backwards from A's relay parent. + + modified_storage.remove_candidate(&candidate_a_hash); + let (modified_pvd_a, modified_candidate_a) = make_committed_candidate( + para_id, + relay_parent_y_info.hash, + relay_parent_y_info.number, + vec![0x0a].into(), + vec![0x0b].into(), + relay_parent_y_info.number, + ); + let modified_candidate_a_hash = modified_candidate_a.hash(); + modified_storage + .add_candidate_entry( + CandidateEntry::new( + modified_candidate_a_hash, + modified_candidate_a, + modified_pvd_a, + CandidateState::Backed, + ) + .unwrap(), + ) + .unwrap(); + + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![PendingAvailability { + candidate_hash: modified_candidate_a_hash, + relay_parent: relay_parent_y_info.clone(), + }], + 4, + ancestors.clone(), + ) + .unwrap(); + + let chain = FragmentChain::populate(scope, modified_storage.clone()); + assert_eq!(chain.best_chain_vec(), vec![modified_candidate_a_hash, candidate_b_hash]); + assert_eq!(chain.unconnected_len(), 0); + // Parachain cycle is not allowed. Make C have the same parent as A. let mut modified_storage = storage.clone(); modified_storage.remove_candidate(&candidate_c_hash); @@ -659,6 +740,107 @@ fn populate_with_empty_best_chain() { assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); assert_eq!(chain.unconnected_len(), 0); + // Test with candidates pending availability + { + // Valid options + for pending in [ + vec![PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_x_info.clone(), + }], + vec![ + PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_x_info.clone(), + }, + PendingAvailability { + candidate_hash: candidate_b_hash, + relay_parent: relay_parent_y_info.clone(), + }, + ], + vec![ + PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_x_info.clone(), + }, + PendingAvailability { + candidate_hash: candidate_b_hash, + relay_parent: relay_parent_y_info.clone(), + }, + PendingAvailability { + candidate_hash: candidate_c_hash, + relay_parent: relay_parent_z_info.clone(), + }, + ], + ] { + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + pending, + 3, + ancestors.clone(), + ) + .unwrap(); + let chain = FragmentChain::populate(scope, storage.clone()); + assert_eq!( + chain.best_chain_vec(), + vec![candidate_a_hash, candidate_b_hash, candidate_c_hash] + ); + assert_eq!(chain.unconnected_len(), 0); + } + + // Relay parents of pending availability candidates can be out of scope + // Relay parent of candidate A is out of scope. + let ancestors_without_x = vec![relay_parent_y_info.clone()]; + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_x_info.clone(), + }], + 4, + ancestors_without_x, + ) + .unwrap(); + let chain = FragmentChain::populate(scope, storage.clone()); + assert_eq!( + chain.best_chain_vec(), + vec![candidate_a_hash, candidate_b_hash, candidate_c_hash] + ); + assert_eq!(chain.unconnected_len(), 0); + + // Even relay parents of pending availability candidates which are out of scope cannot + // move backwards. + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![ + PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: RelayChainBlockInfo { + hash: relay_parent_x_info.hash, + number: 1, + storage_root: relay_parent_x_info.storage_root, + }, + }, + PendingAvailability { + candidate_hash: candidate_b_hash, + relay_parent: RelayChainBlockInfo { + hash: relay_parent_y_info.hash, + number: 0, + storage_root: relay_parent_y_info.storage_root, + }, + }, + ], + 4, + vec![], + ) + .unwrap(); + let chain = FragmentChain::populate(scope, storage.clone()); + assert!(chain.best_chain_vec().is_empty()); + } + // More complex case: // max_depth is 2 (a chain of max depth 3). // A -> B -> C are the best backable chain. @@ -667,9 +849,178 @@ fn populate_with_empty_best_chain() { // A1 has same parent as A, is backed but has a higher candidate hash. It'll therefore be // deleted. // A1 has underneath a subtree that will all need to be trimmed. A1 -> B1. B1 -> C1 - // and B1 -> C2. (C1 is backed). C1 -> D1 and C1 -> D2. + // and B1 -> C2. (C1 is backed). // A2 is seconded but is kept because it has a lower candidate hash than A. // A2 points to B2, which is backed. + // + // Check that D, F, A2 and B2 are kept as unconnected potential candidates. + + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 2, + ancestors.clone(), + ) + .unwrap(); + + // Candidate D + let (pvd_d, candidate_d) = make_committed_candidate( + para_id, + relay_parent_z_info.hash, + relay_parent_z_info.number, + vec![0x0d].into(), + vec![0x0e].into(), + relay_parent_z_info.number, + ); + let candidate_d_hash = candidate_d.hash(); + storage + .add_candidate_entry( + CandidateEntry::new(candidate_d_hash, candidate_d, pvd_d, CandidateState::Backed) + .unwrap(), + ) + .unwrap(); + + // Candidate F + let (pvd_f, candidate_f) = make_committed_candidate( + para_id, + relay_parent_z_info.hash, + relay_parent_z_info.number, + vec![0x0f].into(), + vec![0xf1].into(), + relay_parent_z_info.number, + ); + let candidate_f_hash = candidate_f.hash(); + storage + .add_candidate_entry( + CandidateEntry::new(candidate_f_hash, candidate_f, pvd_f, CandidateState::Seconded) + .unwrap(), + ) + .unwrap(); + + // Candidate A1 + let (pvd_a1, candidate_a1) = make_committed_candidate( + para_id, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0x0a].into(), + vec![0xb1].into(), + relay_parent_x_info.number, + ); + let candidate_a1_hash = candidate_a1.hash(); + // Candidate A1 is created so that its hash is larger than the candidate A hash. + assert_eq!(fork_selection_rule(&candidate_a_hash, &candidate_a1_hash), Ordering::Less); + + storage + .add_candidate_entry( + CandidateEntry::new(candidate_a1_hash, candidate_a1, pvd_a1, CandidateState::Backed) + .unwrap(), + ) + .unwrap(); + + // Candidate B1. + let (pvd_b1, candidate_b1) = make_committed_candidate( + para_id, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0xb1].into(), + vec![0xc1].into(), + relay_parent_x_info.number, + ); + let candidate_b1_hash = candidate_b1.hash(); + + storage + .add_candidate_entry( + CandidateEntry::new(candidate_b1_hash, candidate_b1, pvd_b1, CandidateState::Seconded) + .unwrap(), + ) + .unwrap(); + + // Candidate C1. + let (pvd_c1, candidate_c1) = make_committed_candidate( + para_id, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0xc1].into(), + vec![0xd1].into(), + relay_parent_x_info.number, + ); + let candidate_c1_hash = candidate_c1.hash(); + + storage + .add_candidate_entry( + CandidateEntry::new(candidate_c1_hash, candidate_c1, pvd_c1, CandidateState::Backed) + .unwrap(), + ) + .unwrap(); + + // Candidate C2. + let (pvd_c2, candidate_c2) = make_committed_candidate( + para_id, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0xc1].into(), + vec![0xd2].into(), + relay_parent_x_info.number, + ); + let candidate_c2_hash = candidate_c2.hash(); + + storage + .add_candidate_entry( + CandidateEntry::new(candidate_c2_hash, candidate_c2, pvd_c2, CandidateState::Seconded) + .unwrap(), + ) + .unwrap(); + + // Candidate A2. + let (pvd_a2, candidate_a2) = make_committed_candidate( + para_id, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0x0a].into(), + vec![0xb3].into(), + relay_parent_x_info.number, + ); + let candidate_a2_hash = candidate_a2.hash(); + // Candidate A2 is created so that its hash is larger than the candidate A hash. + assert_eq!(fork_selection_rule(&candidate_a2_hash, &candidate_a_hash), Ordering::Less); + + storage + .add_candidate_entry( + CandidateEntry::new(candidate_a2_hash, candidate_a2, pvd_a2, CandidateState::Seconded) + .unwrap(), + ) + .unwrap(); + + // Candidate B2. + let (pvd_b2, candidate_b2) = make_committed_candidate( + para_id, + relay_parent_y_info.hash, + relay_parent_y_info.number, + vec![0xb3].into(), + vec![0xb4].into(), + relay_parent_y_info.number, + ); + let candidate_b2_hash = candidate_b2.hash(); + + storage + .add_candidate_entry( + CandidateEntry::new(candidate_b2_hash, candidate_b2, pvd_b2, CandidateState::Backed) + .unwrap(), + ) + .unwrap(); + + let chain = FragmentChain::populate(scope, storage.clone()); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_d_hash, candidate_f_hash, candidate_a2_hash, candidate_b2_hash] + .into_iter() + .collect() + ); + + // TODO: add test for ForkWithCandidatePendingAvailability + // TODO: add test for the complete candidate checks } #[test] From cb91a201954baeb5b9c960112b20c1653ce6b855 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 24 Jul 2024 16:22:52 +0300 Subject: [PATCH 19/56] typos --- .../prospective-parachains/src/fragment_chain/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 08b8d1d6ad06..7e40179ca0a3 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -37,18 +37,18 @@ //! The unconnected storage keeps a record of seconded/backable candidates that may be //! added to the best chain in the future. //! Once a candidate is seconded, it becomes part of this unconnected storage. -//! Only after it is backed it may be added to the best chain (but not neccessarily). It's only +//! Only after it is backed it may be added to the best chain (but not necessarily). It's only //! added if it builds on the latest candidate in the chain and if there isn't a better backable //! candidate according to the fork selection rule. //! //! An important thing to note is that the candidates present in the unconnected storage may have //! any/no relationship between them. In other words, they may form N trees and may even form //! cycles. This is needed so that we may begin validating candidates for which we don't yet know -//! their parent (so we may parallelise the backing process across different groups for elastic +//! their parent (so we may parallelize the backing process across different groups for elastic //! scaling) and so that we accept parachain forks. //! //! We accept parachain forks only until reaching the backing quorum. After that, we assume all -//! validators pick the same fork accroding to the fork selection rule. If we decided to not accept +//! validators pick the same fork according to the fork selection rule. If we decided to not accept //! parachain forks, candidates could end up getting only half of the backing votes or even less //! (for forks of larger arity). This would affect the validator rewards. Still, we don't guarantee //! that a fork-producing parachains will be able to fully use elastic scaling. @@ -837,7 +837,7 @@ impl FragmentChain { } /// Checks if this candidate could be added in the future to this chain. - /// This will return `Error::CandidateAlreadyKnown` if the candidate is alrady in the chain or + /// This will return `Error::CandidateAlreadyKnown` if the candidate is already in the chain or /// the unconnected candidate storage. It will return /// `Error::CandidateAlreadyPendingAvailability` if the candidate is already pending /// availability. @@ -1075,7 +1075,7 @@ impl FragmentChain { // Populate the fragment chain with candidates from the supplied `CandidateStorage`. // Can be called by the constructor or when backing a new candidate. - // When this is called, it may cause a the previous chain to be completely erased or it may add + // When this is called, it may cause the previous chain to be completely erased or it may add // more than one candidate. fn populate_chain(&mut self, storage: &mut CandidateStorage) { let mut cumulative_modifications = From 1f1fde5cce41b28785db99d22c1686a898aa24df Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 24 Jul 2024 17:47:37 +0300 Subject: [PATCH 20/56] unit test for ForkWithCandidatePendingAvailability --- .../src/fragment_chain/tests.rs | 66 +++++++++++++++---- 1 file changed, 54 insertions(+), 12 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 4a236dd4c924..8ba68945f436 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -588,6 +588,41 @@ fn populate_with_empty_best_chain() { assert!(chain.best_chain_vec().is_empty()); assert_eq!(chain.unconnected_len(), 0); + // Parachain cycle is not allowed. Make C have the same parent as A. + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( + para_id, + relay_parent_z_info.hash, + relay_parent_z_info.number, + vec![0x0c].into(), + vec![0x0a].into(), + relay_parent_z_info.number, + ); + modified_storage + .add_candidate_entry( + CandidateEntry::new( + wrong_candidate_c.hash(), + wrong_candidate_c, + wrong_pvd_c, + CandidateState::Backed, + ) + .unwrap(), + ) + .unwrap(); + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + + let chain = FragmentChain::populate(scope, modified_storage); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); + assert_eq!(chain.unconnected_len(), 0); + // Candidate C has the same relay parent as candidate A's parent. Relay parent not allowed // to move backwards let mut modified_storage = storage.clone(); @@ -705,21 +740,20 @@ fn populate_with_empty_best_chain() { assert_eq!(chain.best_chain_vec(), vec![modified_candidate_a_hash, candidate_b_hash]); assert_eq!(chain.unconnected_len(), 0); - // Parachain cycle is not allowed. Make C have the same parent as A. - let mut modified_storage = storage.clone(); - modified_storage.remove_candidate(&candidate_c_hash); + // Not allowed to fork from a candidate pending availability let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( para_id, - relay_parent_z_info.hash, - relay_parent_z_info.number, - vec![0x0c].into(), + relay_parent_y_info.hash, + relay_parent_y_info.number, vec![0x0a].into(), - relay_parent_z_info.number, + vec![0x0b2].into(), + 0, ); + let wrong_candidate_c_hash = wrong_candidate_c.hash(); modified_storage .add_candidate_entry( CandidateEntry::new( - wrong_candidate_c.hash(), + wrong_candidate_c_hash, wrong_candidate_c, wrong_pvd_c, CandidateState::Backed, @@ -727,17 +761,26 @@ fn populate_with_empty_best_chain() { .unwrap(), ) .unwrap(); + + assert_eq!( + fork_selection_rule(&wrong_candidate_c_hash, &modified_candidate_a_hash), + Ordering::Less + ); + let scope = Scope::with_ancestors( relay_parent_z_info.clone(), base_constraints.clone(), - pending_availability.clone(), + vec![PendingAvailability { + candidate_hash: modified_candidate_a_hash, + relay_parent: relay_parent_y_info.clone(), + }], 4, ancestors.clone(), ) .unwrap(); - let chain = FragmentChain::populate(scope, modified_storage); - assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); + let chain = FragmentChain::populate(scope, modified_storage.clone()); + assert_eq!(chain.best_chain_vec(), vec![modified_candidate_a_hash, candidate_b_hash]); assert_eq!(chain.unconnected_len(), 0); // Test with candidates pending availability @@ -1019,7 +1062,6 @@ fn populate_with_empty_best_chain() { .collect() ); - // TODO: add test for ForkWithCandidatePendingAvailability // TODO: add test for the complete candidate checks } From 1a07e5647c2992447e54f954293f800c919a6f98 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 24 Jul 2024 17:50:56 +0300 Subject: [PATCH 21/56] update lock --- Cargo.lock | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index d085f2d90ff7..798f515fc056 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13428,7 +13428,6 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "rstest", - "sc-keystore", "sp-application-crypto", "sp-core", "sp-keyring", From eae93459c1e2607aa7856fa0654ada706ff8d895 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 24 Jul 2024 17:51:42 +0300 Subject: [PATCH 22/56] fmt --- polkadot/node/core/prospective-parachains/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 4def739b63bf..cddb3c44a1a3 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -219,7 +219,7 @@ async fn handle_active_leaves_update( let requested_ancestry_len = if allowed_ancestry_len == 0 { 1 - // We should try requesting at least one, so that we can know the previous leaf. + // We should try requesting at least one, so that we can know the previous leaf. } else { allowed_ancestry_len }; From 27dd91d650d3f925f4eb301ea4251ad0e5d83cb3 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 25 Jul 2024 12:10:38 +0300 Subject: [PATCH 23/56] don't keep candidates from previous leaf if they used to be pending availability --- .../core/prospective-parachains/src/lib.rs | 46 +++++++++++-------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index cddb3c44a1a3..02dff4a3e1a9 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -236,17 +236,6 @@ async fn handle_active_leaves_update( let mut fragment_chains = HashMap::new(); for para in scheduled_paras { - // Get the candidate storage of the parent leaf, if present. - let prev_candidate_storage = prev_fragment_chains - .map(|chains| { - chains - .fragment_chains - .get(¶) - .map(|chain| chain.as_candidate_storage()) - .unwrap_or_default() - }) - .unwrap_or_default(); - // Find constraints and pending availability candidates. let backing_state = fetch_backing_state(ctx, hash, para).await?; let Some((constraints, pending_availability)) = backing_state else { @@ -322,6 +311,32 @@ async fn handle_active_leaves_update( }, }; + // Get the candidate storage of the parent leaf, if present. + let prev_fragment_chain = + prev_fragment_chains.and_then(|chains| chains.fragment_chains.get(¶)); + + if let Some(prev_fragment_chain) = prev_fragment_chain { + // Add old candidates to the new storage only after we added the pending + // availability candidates. The pending candidates have higher priority and can + // conflict with the old candidates. + for candidate in prev_fragment_chain.as_candidate_storage().into_candidates() { + // If they used to be pending availability, don't add them. This is fine + // because: + // - if they still are pending availability, they have already been added to the + // new storage. + // - if they were included, no point in keeping them. + if prev_fragment_chain + .scope() + .get_pending_availability(&candidate.hash()) + .is_none() + { + // We need to swallow any potential errors here, as they can happen under + // normal operation, with candidates becoming out of scope for example. + let _ = new_storage.add_candidate_entry(candidate); + } + } + } + gum::trace!( target: LOG_TARGET, relay_parent = ?hash, @@ -331,15 +346,6 @@ async fn handle_active_leaves_update( "Creating fragment chain" ); - // Add old candidates to the new storage only after we added the pending availability - // candidates. The pending candidates have higher priority and can conflict with the old - // candidates. - for candidate in prev_candidate_storage.into_candidates() { - // We need to swallow any potential errors here, as they can happen under normal - // operation, with candidates becoming out of scope for example. - let _ = new_storage.add_candidate_entry(candidate); - } - // Finally, populate the fragment chain. let chain = FragmentChain::populate(scope, new_storage); From b691b86bbb78bb03b9129867fa84aaf8d2a81181 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 25 Jul 2024 12:12:13 +0300 Subject: [PATCH 24/56] more tests --- .../src/fragment_chain/mod.rs | 2 +- .../src/fragment_chain/tests.rs | 103 +++++++++++++++++- 2 files changed, 100 insertions(+), 5 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 7e40179ca0a3..b6edbb5c77f2 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -549,7 +549,7 @@ impl Scope { } /// Whether the candidate in question is one pending availability in this scope. - fn get_pending_availability( + pub fn get_pending_availability( &self, candidate_hash: &CandidateHash, ) -> Option<&PendingAvailability> { diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 8ba68945f436..550582f73969 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -400,7 +400,7 @@ fn populate_empty() { } #[test] -fn populate_with_empty_best_chain() { +fn test_populate() { let mut storage = CandidateStorage::default(); let para_id = ParaId::from(5u32); @@ -931,7 +931,7 @@ fn populate_with_empty_best_chain() { relay_parent_z_info.number, vec![0x0f].into(), vec![0xf1].into(), - relay_parent_z_info.number, + 1000, ); let candidate_f_hash = candidate_f.hash(); storage @@ -1053,7 +1053,7 @@ fn populate_with_empty_best_chain() { ) .unwrap(); - let chain = FragmentChain::populate(scope, storage.clone()); + let chain = FragmentChain::populate(scope.clone(), storage.clone()); assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); assert_eq!( chain.unconnected().map(|c| c.candidate_hash).collect::>(), @@ -1062,7 +1062,102 @@ fn populate_with_empty_best_chain() { .collect() ); - // TODO: add test for the complete candidate checks + // Candidate F has an invalid hrmp watermark. however, it was not checked beforehand as we don't + // have its parent yet. Add its parent now. This will not impact anything as E is not yet part + // of the best chain. + + let (pvd_e, candidate_e) = make_committed_candidate( + para_id, + relay_parent_z_info.hash, + relay_parent_z_info.number, + vec![0x0e].into(), + vec![0x0f].into(), + relay_parent_z_info.number, + ); + let candidate_e_hash = candidate_e.hash(); + storage + .add_candidate_entry( + CandidateEntry::new(candidate_e_hash, candidate_e, pvd_e, CandidateState::Seconded) + .unwrap(), + ) + .unwrap(); + + let chain = FragmentChain::populate(scope, storage.clone()); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [ + candidate_d_hash, + candidate_f_hash, + candidate_a2_hash, + candidate_b2_hash, + candidate_e_hash + ] + .into_iter() + .collect() + ); + + // Simulate the fact that candidates A, B, C are now pending availability. + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![ + PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_x_info, + }, + PendingAvailability { + candidate_hash: candidate_b_hash, + relay_parent: relay_parent_y_info, + }, + PendingAvailability { + candidate_hash: candidate_c_hash, + relay_parent: relay_parent_z_info.clone(), + }, + ], + 2, + ancestors.clone(), + ) + .unwrap(); + + // A2 and B2 will now be trimmed + let chain = FragmentChain::populate(scope.clone(), storage.clone()); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_d_hash, candidate_f_hash, candidate_e_hash].into_iter().collect() + ); + + // Simulate the fact that candidates A, B and C have been included. + + let mut new_storage = chain.as_candidate_storage(); + // We need to remove the candidates that used to be pending availability. This is what the + // subsystem is doing. + for candidate in scope.pending_availability { + new_storage.remove_candidate(&candidate.candidate_hash); + } + + let base_constraints = make_constraints(0, vec![0], HeadData(vec![0x0d])); + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 2, + ancestors.clone(), + ) + .unwrap(); + + let chain = FragmentChain::populate(scope, new_storage); + assert_eq!(chain.best_chain_vec(), vec![candidate_d_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_e_hash, candidate_f_hash].into_iter().collect() + ); + + // Mark E as backed. F will be dropped for invalid watermark. No other unconnected candidates. + let chain = chain.candidate_backed(&candidate_e_hash).unwrap(); + assert_eq!(chain.best_chain_vec(), vec![candidate_d_hash, candidate_e_hash]); + assert_eq!(chain.unconnected_len(), 0); } #[test] From b13c5361cdc9c55be1fe41c292eb683a6c50eb54 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 25 Jul 2024 13:58:13 +0300 Subject: [PATCH 25/56] unit tests for backable chain --- Cargo.lock | 1 + .../src/fragment_chain/tests.rs | 444 +++++++++--------- 2 files changed, 222 insertions(+), 223 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 798f515fc056..0086d166d16a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13427,6 +13427,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rand", "rstest", "sp-application-crypto", "sp-core", diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 550582f73969..31757c6aaaa6 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -21,6 +21,8 @@ use polkadot_primitives::{ BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData, Id as ParaId, }; use polkadot_primitives_test_helpers as test_helpers; +use rand::{seq::SliceRandom, thread_rng}; +use std::ops::Range; fn make_constraints( min_relay_parent_number: BlockNumber, @@ -1187,245 +1189,241 @@ fn test_find_ancestor_path_and_find_backable_chain_empty_best_chain() { assert_eq!(chain.find_backable_chain(ancestors, 2), vec![]); } -// #[test] -// fn test_find_ancestor_path_and_find_backable_to_vec() { -// let para_id = ParaId::from(5u32); -// let relay_parent = Hash::repeat_byte(1); -// let required_parent: HeadData = vec![0xff].into(); -// let max_depth = 5; -// let relay_parent_number = 0; -// let relay_parent_storage_root = Hash::repeat_byte(69); +#[test] +fn test_find_ancestor_path_and_find_backable_chain() { + let para_id = ParaId::from(5u32); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 5; + let relay_parent_number = 0; + let relay_parent_storage_root = Hash::zero(); -// let mut candidates = vec![]; + let mut candidates = vec![]; -// // Candidate 0 -// candidates.push(make_committed_candidate( -// para_id, -// relay_parent, -// 0, -// required_parent.clone(), -// vec![0].into(), -// 0, -// )); -// // Candidate 1 -// candidates.push(make_committed_candidate( -// para_id, -// relay_parent, -// 0, -// vec![0].into(), -// vec![1].into(), -// 0, -// )); -// // Candidate 2 -// candidates.push(make_committed_candidate( -// para_id, -// relay_parent, -// 0, -// vec![1].into(), -// vec![2].into(), -// 0, -// )); -// // Candidate 3 -// candidates.push(make_committed_candidate( -// para_id, -// relay_parent, -// 0, -// vec![2].into(), -// vec![3].into(), -// 0, -// )); -// // Candidate 4 -// candidates.push(make_committed_candidate( -// para_id, -// relay_parent, -// 0, -// vec![3].into(), -// vec![4].into(), -// 0, -// )); -// // Candidate 5 -// candidates.push(make_committed_candidate( -// para_id, -// relay_parent, -// 0, -// vec![4].into(), -// vec![5].into(), -// 0, -// )); + // Candidate 0 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + required_parent.clone(), + vec![0].into(), + 0, + )); + // Candidate 1 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![0].into(), + vec![1].into(), + 0, + )); + // Candidate 2 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![1].into(), + vec![2].into(), + 0, + )); + // Candidate 3 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![2].into(), + vec![3].into(), + 0, + )); + // Candidate 4 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![3].into(), + vec![4].into(), + 0, + )); + // Candidate 5 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![4].into(), + vec![5].into(), + 0, + )); -// let base_constraints = make_constraints(0, vec![0], required_parent.clone()); -// let mut storage = CandidateStorage::default(); + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + let mut storage = CandidateStorage::default(); -// let relay_parent_info = RelayChainBlockInfo { -// number: relay_parent_number, -// hash: relay_parent, -// storage_root: relay_parent_storage_root, -// }; + let relay_parent_info = RelayChainBlockInfo { + number: relay_parent_number, + hash: relay_parent, + storage_root: relay_parent_storage_root, + }; -// for (pvd, candidate) in candidates.iter() { -// storage -// .add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded) -// .unwrap(); -// } -// let candidates = candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::>(); -// let scope = Scope::with_ancestors( -// relay_parent_info.clone(), -// base_constraints.clone(), -// vec![], -// max_depth, -// vec![], -// ) -// .unwrap(); -// let chain = FragmentChain::populate(scope, &storage); + for (pvd, candidate) in candidates.iter() { + storage + .add_candidate_entry( + CandidateEntry::new_seconded(candidate.hash(), candidate.clone(), pvd.clone()) + .unwrap(), + ) + .unwrap(); + } + + let candidates = candidates + .into_iter() + .map(|(_pvd, candidate)| candidate.hash()) + .collect::>(); + let hashes = + |range: Range| range.map(|i| (candidates[i], relay_parent)).collect::>(); -// assert_eq!(candidates.len(), 6); -// assert_eq!(chain.to_vec().len(), 6); + let scope = Scope::with_ancestors( + relay_parent_info.clone(), + base_constraints.clone(), + vec![], + max_depth, + vec![], + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, storage.clone()); -// // No ancestors supplied. -// assert_eq!(chain.find_ancestor_path(Ancestors::new()), 0); -// assert_eq!(chain.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); -// assert_eq!( -// chain.find_backable_chain(Ancestors::new(), 1, |_| true), -// [0].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); -// assert_eq!( -// chain.find_backable_chain(Ancestors::new(), 2, |_| true), -// [0, 1].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); -// assert_eq!( -// chain.find_backable_chain(Ancestors::new(), 5, |_| true), -// [0, 1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); + // For now, candidates are only seconded, not backed. So the best chain is empty and no + // candidate will be returned. + assert_eq!(candidates.len(), 6); + assert_eq!(chain.best_chain_len(), 0); + assert_eq!(chain.unconnected_len(), 6); -// for count in 6..10 { -// assert_eq!( -// chain.find_backable_chain(Ancestors::new(), count, |_| true), -// [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); -// } + for count in 0..10 { + assert_eq!(chain.find_backable_chain(Ancestors::new(), count).len(), 0); + } -// assert_eq!( -// chain.find_backable_chain(Ancestors::new(), 7, |_| true), -// [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); -// assert_eq!( -// chain.find_backable_chain(Ancestors::new(), 10, |_| true), -// [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); + // Do tests with only a couple of candidates being backed. + { + let chain = chain.candidate_backed(&&candidates[5]).unwrap(); + for count in 0..10 { + assert_eq!(chain.find_backable_chain(Ancestors::new(), count).len(), 0); + } + let chain = chain.candidate_backed(&&candidates[3]).unwrap(); + let chain = chain.candidate_backed(&&candidates[4]).unwrap(); + for count in 0..10 { + assert_eq!(chain.find_backable_chain(Ancestors::new(), count).len(), 0); + } -// // Ancestor which is not part of the chain. Will be ignored. -// let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); -// assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); -// assert_eq!( -// chain.find_backable_chain(ancestors, 4, |_| true), -// [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); -// let ancestors: Ancestors = -// [candidates[1].hash(), CandidateHash::default()].into_iter().collect(); -// assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); -// assert_eq!( -// chain.find_backable_chain(ancestors, 4, |_| true), -// [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); -// let ancestors: Ancestors = -// [candidates[0].hash(), CandidateHash::default()].into_iter().collect(); -// assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1); -// assert_eq!( -// chain.find_backable_chain(ancestors, 4, |_| true), -// [1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); + let chain = chain.candidate_backed(&&candidates[1]).unwrap(); + for count in 0..10 { + assert_eq!(chain.find_backable_chain(Ancestors::new(), count).len(), 0); + } -// // Ancestors which are part of the chain but don't form a path from root. Will be ignored. -// let ancestors: Ancestors = [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); -// assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); -// assert_eq!( -// chain.find_backable_chain(ancestors, 4, |_| true), -// [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); + let chain = chain.candidate_backed(&&candidates[0]).unwrap(); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 1), hashes(0..1)); + for count in 2..10 { + assert_eq!(chain.find_backable_chain(Ancestors::new(), count), hashes(0..2)); + } -// // Valid ancestors. -// let ancestors: Ancestors = [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] -// .into_iter() -// .collect(); -// assert_eq!(chain.find_ancestor_path(ancestors.clone()), 3); -// assert_eq!( -// chain.find_backable_chain(ancestors.clone(), 2, |_| true), -// [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); -// for count in 3..10 { -// assert_eq!( -// chain.find_backable_chain(ancestors.clone(), count, |_| true), -// [3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); -// } + // Now back the missing piece. + let chain = chain.candidate_backed(&&candidates[2]).unwrap(); + assert_eq!(chain.best_chain_len(), 6); + for count in 0..10 { + assert_eq!( + chain.find_backable_chain(Ancestors::new(), count), + (0..6) + .take(count as usize) + .map(|i| (candidates[i], relay_parent)) + .collect::>() + ); + } + } -// // Valid ancestors with candidates which have been omitted due to timeouts -// let ancestors: Ancestors = [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); -// assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1); -// assert_eq!( -// chain.find_backable_chain(ancestors.clone(), 3, |_| true), -// [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); -// assert_eq!( -// chain.find_backable_chain(ancestors.clone(), 4, |_| true), -// [1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); -// for count in 5..10 { -// assert_eq!( -// chain.find_backable_chain(ancestors.clone(), count, |_| true), -// [1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); -// } + // Now back all candidates. Back them in a random order. The result should always be the same. + let mut candidates_shuffled = candidates.clone(); + candidates_shuffled.shuffle(&mut thread_rng()); + for candidate in candidates.iter() { + chain = chain.candidate_backed(candidate).unwrap(); + assert!(storage.mark_backed(candidate)); + } -// let ancestors: Ancestors = [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] -// .into_iter() -// .collect(); -// assert_eq!(chain.find_ancestor_path(ancestors.clone()), 2); -// assert_eq!( -// chain.find_backable_chain(ancestors.clone(), 4, |_| true), -// [2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); + // No ancestors supplied. + assert_eq!(chain.find_ancestor_path(Ancestors::new()), 0); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 0), vec![]); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 1), hashes(0..1)); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 2), hashes(0..2)); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 5), hashes(0..5)); -// // Requested count is 0. -// assert_eq!(chain.find_backable_chain(ancestors, 0, |_| true), vec![]); - -// // Stop when we've found a candidate for which pred returns false. -// let ancestors: Ancestors = [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] -// .into_iter() -// .collect(); -// for count in 1..10 { -// assert_eq!( -// // Stop at 4. -// chain.find_backable_chain(ancestors.clone(), count, |hash| hash != -// &candidates[4].hash()), -// [3].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); -// } + for count in 6..10 { + assert_eq!(chain.find_backable_chain(Ancestors::new(), count), hashes(0..6)); + } -// // Stop when we've found a candidate which is pending availability -// { -// let scope = Scope::with_ancestors( -// relay_parent_info.clone(), -// base_constraints, -// // Mark the third candidate as pending availability -// vec![PendingAvailability { -// candidate_hash: candidates[3].hash(), -// relay_parent: relay_parent_info, -// }], -// max_depth, -// vec![], -// ) -// .unwrap(); -// let chain = FragmentChain::populate(scope, &storage); -// let ancestors: Ancestors = -// [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); -// assert_eq!( -// // Stop at 4. -// chain.find_backable_chain(ancestors.clone(), 3, |_| true), -// [2].into_iter().map(|i| candidates[i].hash()).collect::>() -// ); -// } -// } + assert_eq!(chain.find_backable_chain(Ancestors::new(), 7), hashes(0..6)); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 10), hashes(0..6)); + + // Ancestor which is not part of the chain. Will be ignored. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); + assert_eq!(chain.find_backable_chain(ancestors, 4), hashes(0..4)); + + let ancestors: Ancestors = [candidates[1], CandidateHash::default()].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); + assert_eq!(chain.find_backable_chain(ancestors, 4), hashes(0..4)); + + let ancestors: Ancestors = [candidates[0], CandidateHash::default()].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1); + assert_eq!(chain.find_backable_chain(ancestors, 4), hashes(1..5)); + + // Ancestors which are part of the chain but don't form a path from root. Will be ignored. + let ancestors: Ancestors = [candidates[1], candidates[2]].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); + assert_eq!(chain.find_backable_chain(ancestors, 4), hashes(0..4)); + + // Valid ancestors. + let ancestors: Ancestors = [candidates[2], candidates[0], candidates[1]].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 3); + assert_eq!(chain.find_backable_chain(ancestors.clone(), 2), hashes(3..5)); + for count in 3..10 { + assert_eq!(chain.find_backable_chain(ancestors.clone(), count), hashes(3..6)); + } + + // Valid ancestors with candidates which have been omitted due to timeouts + let ancestors: Ancestors = [candidates[0], candidates[2]].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1); + assert_eq!(chain.find_backable_chain(ancestors.clone(), 3), hashes(1..4)); + assert_eq!(chain.find_backable_chain(ancestors.clone(), 4), hashes(1..5)); + for count in 5..10 { + assert_eq!(chain.find_backable_chain(ancestors.clone(), count), hashes(1..6)); + } + + let ancestors: Ancestors = [candidates[0], candidates[1], candidates[3]].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 2); + assert_eq!(chain.find_backable_chain(ancestors.clone(), 4), hashes(2..6)); + + // Requested count is 0. + assert_eq!(chain.find_backable_chain(ancestors, 0), vec![]); + + // Stop when we've found a candidate which is pending availability + { + let scope = Scope::with_ancestors( + relay_parent_info.clone(), + base_constraints, + // Mark the third candidate as pending availability + vec![PendingAvailability { + candidate_hash: candidates[3], + relay_parent: relay_parent_info, + }], + max_depth, + vec![], + ) + .unwrap(); + let chain = FragmentChain::populate(scope, storage); + let ancestors: Ancestors = [candidates[0], candidates[1]].into_iter().collect(); + assert_eq!( + // Stop at 4. + chain.find_backable_chain(ancestors.clone(), 3), + hashes(2..3) + ); + } +} // #[test] // fn hypothetical_membership() { From 45b4133670cad565823600c86844bc427e5cdfe8 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 25 Jul 2024 13:59:13 +0300 Subject: [PATCH 26/56] move wild import --- polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index 3df0836ccf04..37750cdfeb2f 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -11,7 +11,6 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -use polkadot_node_subsystem::messages::HypotheticalCandidate; /// # Overview /// /// A set of utilities for node-side code to emulate the logic the runtime uses for checking @@ -81,6 +80,7 @@ use polkadot_node_subsystem::messages::HypotheticalCandidate; /// /// That means a few blocks of execution time lost, which is not a big deal for code upgrades /// in practice at most once every few weeks. +use polkadot_node_subsystem::messages::HypotheticalCandidate; use polkadot_primitives::{ async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments, CandidateHash, CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, From 6b10a1189849b9dbdba427258882f24909c6d1a8 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 25 Jul 2024 14:50:49 +0300 Subject: [PATCH 27/56] add rand dependency --- polkadot/node/core/prospective-parachains/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 29d805510923..8489d77d82e7 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -31,4 +31,5 @@ sp-core = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } +rand = { workspace = true } rstest = { workspace = true } From a81f149e8dcaafbd0f5c72710e16991ed553f3ce Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 26 Jul 2024 11:23:53 +0300 Subject: [PATCH 28/56] small refactor --- .../src/fragment_chain/mod.rs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index ab746fd8015f..c95ce31aea55 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -404,7 +404,7 @@ impl CandidateEntry { } } -impl HypotheticalOrConcreteCandidate for &CandidateEntry { +impl HypotheticalOrConcreteCandidate for CandidateEntry { fn commitments(&self) -> Option<&CandidateCommitments> { Some(&self.candidate.commitments) } @@ -866,15 +866,13 @@ impl FragmentChain { return Err(Error::IntroduceBackedCandidate); } - let res = self.can_add_candidate_as_potential(&candidate); + self.can_add_candidate_as_potential(candidate)?; - if res.is_ok() { - // This clone is cheap, as it uses an Arc for the expensive stuff. - // We can't consume the candidate because other fragment chains may use it also. - self.unconnected.add_candidate_entry(candidate.clone())?; - } + // This clone is cheap, as it uses an Arc for the expensive stuff. + // We can't consume the candidate because other fragment chains may use it also. + self.unconnected.add_candidate_entry(candidate.clone())?; - res + Ok(()) } // Populate the unconnected potential candidate storage starting from a previous storage. @@ -886,7 +884,7 @@ impl FragmentChain { continue } - match self.can_add_candidate_as_potential(&&candidate) { + match self.can_add_candidate_as_potential(&candidate) { Ok(()) => { let _ = self.unconnected.add_candidate_entry(candidate); }, @@ -1054,7 +1052,7 @@ impl FragmentChain { // Only keep a candidate if its full ancestry was already kept as potential and this // candidate itself has potential. - if parent_has_potential && self.check_potential(&child).is_ok() { + if parent_has_potential && self.check_potential(child).is_ok() { queue.push_back((child.output_head_data_hash, true)); } else { // Otherwise, remove this candidate and continue looping for its children, but From dedfd685fcdbf9ce997c8d32010072ce2a2e2e2c Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 26 Jul 2024 12:07:05 +0300 Subject: [PATCH 29/56] more unit testing --- .../src/fragment_chain/tests.rs | 679 +++++++----------- 1 file changed, 244 insertions(+), 435 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 31757c6aaaa6..b4a2cdaeb1e0 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -402,7 +402,7 @@ fn populate_empty() { } #[test] -fn test_populate() { +fn test_populate_and_check_potential() { let mut storage = CandidateStorage::default(); let para_id = ParaId::from(5u32); @@ -423,7 +423,6 @@ fn test_populate() { ]; let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); // Candidates A -> B -> C. They are all backed let (pvd_a, candidate_a) = make_committed_candidate( @@ -435,12 +434,10 @@ fn test_populate() { relay_parent_x_info.number, ); let candidate_a_hash = candidate_a.hash(); - storage - .add_candidate_entry( - CandidateEntry::new(candidate_a_hash, candidate_a, pvd_a, CandidateState::Backed) - .unwrap(), - ) - .unwrap(); + let candidate_a_entry = + CandidateEntry::new(candidate_a_hash, candidate_a, pvd_a.clone(), CandidateState::Backed) + .unwrap(); + storage.add_candidate_entry(candidate_a_entry.clone()).unwrap(); let (pvd_b, candidate_b) = make_committed_candidate( para_id, relay_parent_y_info.hash, @@ -450,12 +447,9 @@ fn test_populate() { relay_parent_y_info.number, ); let candidate_b_hash = candidate_b.hash(); - storage - .add_candidate_entry( - CandidateEntry::new(candidate_b_hash, candidate_b, pvd_b, CandidateState::Backed) - .unwrap(), - ) - .unwrap(); + let candidate_b_entry = + CandidateEntry::new(candidate_b_hash, candidate_b, pvd_b, CandidateState::Backed).unwrap(); + storage.add_candidate_entry(candidate_b_entry.clone()).unwrap(); let (pvd_c, candidate_c) = make_committed_candidate( para_id, relay_parent_z_info.hash, @@ -465,12 +459,9 @@ fn test_populate() { relay_parent_z_info.number, ); let candidate_c_hash = candidate_c.hash(); - storage - .add_candidate_entry( - CandidateEntry::new(candidate_c_hash, candidate_c, pvd_c, CandidateState::Backed) - .unwrap(), - ) - .unwrap(); + let candidate_c_entry = + CandidateEntry::new(candidate_c_hash, candidate_c, pvd_c, CandidateState::Backed).unwrap(); + storage.add_candidate_entry(candidate_c_entry.clone()).unwrap(); // Candidate A doesn't adhere to the base constraints. { @@ -487,7 +478,7 @@ fn test_populate() { let scope = Scope::with_ancestors( relay_parent_z_info.clone(), wrong_constraints.clone(), - pending_availability.clone(), + vec![], 4, ancestors.clone(), ) @@ -500,8 +491,16 @@ fn test_populate() { // Otherwise, if only the required parent doesn't match, candidate A is still a // potential candidate. if wrong_constraints.min_relay_parent_number == relay_parent_y_info.number { - // If A is not a potential candidate, neither are his descendants. + // If A is not a potential candidate, its descendants will also not be added. assert_eq!(chain.unconnected_len(), 0); + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_a_entry), + Err(Error::RelayParentNotInScope(_, _)) + ); + // However, if taken independently, both B and C still have potential, since we + // don't know that A doesn't. + assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); } else { assert_eq!( chain.unconnected().map(|c| c.candidate_hash).collect::>(), @@ -511,119 +510,168 @@ fn test_populate() { } } - // Depth is 0, only allows one candidate, but the others will be kept as potential. - let scope = Scope::with_ancestors( - relay_parent_z_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 0, - ancestors.clone(), - ) - .unwrap(); - let chain = FragmentChain::populate(scope, storage.clone()); - assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash]); - assert_eq!( - chain.unconnected().map(|c| c.candidate_hash).collect::>(), - [candidate_b_hash, candidate_c_hash].into_iter().collect() - ); + // Various depths + { + // Depth is 0, only allows one candidate, but the others will be kept as potential. + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![], + 0, + ancestors.clone(), + ) + .unwrap(); + let chain = FragmentChain::populate(scope.clone(), CandidateStorage::default()); + assert!(chain.can_add_candidate_as_potential(&candidate_a_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); - // depth is 1, allows two candidates - let scope = Scope::with_ancestors( - relay_parent_z_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 1, - ancestors.clone(), - ) - .unwrap(); - let chain = FragmentChain::populate(scope, storage.clone()); - assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); - assert_eq!( - chain.unconnected().map(|c| c.candidate_hash).collect::>(), - [candidate_c_hash].into_iter().collect() - ); + let chain = FragmentChain::populate(scope, storage.clone()); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_b_hash, candidate_c_hash].into_iter().collect() + ); - // depth is larger than 2, allows all three candidates - for depth in 2..6 { + // depth is 1, allows two candidates let scope = Scope::with_ancestors( relay_parent_z_info.clone(), base_constraints.clone(), - pending_availability.clone(), - depth, + vec![], + 1, ancestors.clone(), ) .unwrap(); + let chain = FragmentChain::populate(scope.clone(), CandidateStorage::default()); + assert!(chain.can_add_candidate_as_potential(&candidate_a_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); + let chain = FragmentChain::populate(scope, storage.clone()); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); assert_eq!( - chain.best_chain_vec(), - vec![candidate_a_hash, candidate_b_hash, candidate_c_hash] + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_c_hash].into_iter().collect() ); - assert_eq!(chain.unconnected_len(), 0); + + // depth is larger than 2, allows all three candidates + for depth in 2..6 { + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![], + depth, + ancestors.clone(), + ) + .unwrap(); + let chain = FragmentChain::populate(scope.clone(), CandidateStorage::default()); + assert!(chain.can_add_candidate_as_potential(&candidate_a_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); + + let chain = FragmentChain::populate(scope, storage.clone()); + assert_eq!( + chain.best_chain_vec(), + vec![candidate_a_hash, candidate_b_hash, candidate_c_hash] + ); + assert_eq!(chain.unconnected_len(), 0); + } } - // Candidate A has relay parent out of scope. Candidates B and C will also be deleted since - // they form a chain with A. - let ancestors_without_x = vec![relay_parent_y_info.clone()]; - let scope = Scope::with_ancestors( - relay_parent_z_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 4, - ancestors_without_x, - ) - .unwrap(); - let chain = FragmentChain::populate(scope, storage.clone()); - assert!(chain.best_chain_vec().is_empty()); - assert_eq!(chain.unconnected_len(), 0); + // Relay parents out of scope + { + // Candidate A has relay parent out of scope. Candidates B and C will also be deleted since + // they form a chain with A. + let ancestors_without_x = vec![relay_parent_y_info.clone()]; + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![], + 4, + ancestors_without_x, + ) + .unwrap(); + let chain = FragmentChain::populate(scope, storage.clone()); + assert!(chain.best_chain_vec().is_empty()); + assert_eq!(chain.unconnected_len(), 0); - // Candidates A and B have relay parents out of scope. Candidate C will also be deleted since - // it forms a chain with A and B. - let scope = Scope::with_ancestors( - relay_parent_z_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 4, - vec![], - ) - .unwrap(); - let chain = FragmentChain::populate(scope, storage.clone()); - assert!(chain.best_chain_vec().is_empty()); - assert_eq!(chain.unconnected_len(), 0); + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_a_entry), + Err(Error::RelayParentNotInScope(_, _)) + ); + // However, if taken independently, both B and C still have potential, since we + // don't know that A doesn't. + assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); + + // Candidates A and B have relay parents out of scope. Candidate C will also be deleted + // since it forms a chain with A and B. + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![], + 4, + vec![], + ) + .unwrap(); + let chain = FragmentChain::populate(scope, storage.clone()); + assert!(chain.best_chain_vec().is_empty()); + assert_eq!(chain.unconnected_len(), 0); + + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_a_entry), + Err(Error::RelayParentNotInScope(_, _)) + ); + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_b_entry), + Err(Error::RelayParentNotInScope(_, _)) + ); + // However, if taken independently, C still has potential, since we + // don't know that A and B don't + assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); + } // Parachain cycle is not allowed. Make C have the same parent as A. - let mut modified_storage = storage.clone(); - modified_storage.remove_candidate(&candidate_c_hash); - let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( - para_id, - relay_parent_z_info.hash, - relay_parent_z_info.number, - vec![0x0c].into(), - vec![0x0a].into(), - relay_parent_z_info.number, - ); - modified_storage - .add_candidate_entry( - CandidateEntry::new( - wrong_candidate_c.hash(), - wrong_candidate_c, - wrong_pvd_c, - CandidateState::Backed, - ) - .unwrap(), + { + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( + para_id, + relay_parent_z_info.hash, + relay_parent_z_info.number, + vec![0x0c].into(), + vec![0x0a].into(), + relay_parent_z_info.number, + ); + let wrong_candidate_c_entry = CandidateEntry::new( + wrong_candidate_c.hash(), + wrong_candidate_c, + wrong_pvd_c, + CandidateState::Backed, + ) + .unwrap(); + modified_storage.add_candidate_entry(wrong_candidate_c_entry.clone()).unwrap(); + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![], + 4, + ancestors.clone(), ) .unwrap(); - let scope = Scope::with_ancestors( - relay_parent_z_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 4, - ancestors.clone(), - ) - .unwrap(); - let chain = FragmentChain::populate(scope, modified_storage); - assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); - assert_eq!(chain.unconnected_len(), 0); + let chain = FragmentChain::populate(scope.clone(), modified_storage); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); + assert_eq!(chain.unconnected_len(), 0); + + assert_matches!( + chain.can_add_candidate_as_potential(&wrong_candidate_c_entry), + Err(Error::Cycle) + ); + // However, if taken independently, C still has potential, since we don't know A and B. + let chain = FragmentChain::populate(scope.clone(), CandidateStorage::default()); + assert!(chain.can_add_candidate_as_potential(&wrong_candidate_c_entry).is_ok()); + } // Candidate C has the same relay parent as candidate A's parent. Relay parent not allowed // to move backwards @@ -637,21 +685,18 @@ fn test_populate() { vec![0x0d].into(), 0, ); - modified_storage - .add_candidate_entry( - CandidateEntry::new( - wrong_candidate_c.hash(), - wrong_candidate_c, - wrong_pvd_c, - CandidateState::Backed, - ) - .unwrap(), - ) - .unwrap(); + let wrong_candidate_c_entry = CandidateEntry::new( + wrong_candidate_c.hash(), + wrong_candidate_c, + wrong_pvd_c, + CandidateState::Backed, + ) + .unwrap(); + modified_storage.add_candidate_entry(wrong_candidate_c_entry.clone()).unwrap(); let scope = Scope::with_ancestors( relay_parent_z_info.clone(), base_constraints.clone(), - pending_availability.clone(), + vec![], 4, ancestors.clone(), ) @@ -660,9 +705,14 @@ fn test_populate() { let chain = FragmentChain::populate(scope, modified_storage); assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); assert_eq!(chain.unconnected_len(), 0); + assert_matches!( + chain.can_add_candidate_as_potential(&wrong_candidate_c_entry), + Err(Error::RelayParentMovedBackwards) + ); // Candidate C is an unconnected candidate. - // C's relay parent is allowed to move backwards from B's relay parent. + // C's relay parent is allowed to move backwards from B's relay parent, because C may later on + // trigger a reorg and B may get removed. let mut modified_storage = storage.clone(); modified_storage.remove_candidate(&candidate_c_hash); let (unconnected_pvd_c, unconnected_candidate_c) = make_committed_candidate( @@ -674,25 +724,26 @@ fn test_populate() { 0, ); let unconnected_candidate_c_hash = unconnected_candidate_c.hash(); + let unconnected_candidate_c_entry = CandidateEntry::new( + unconnected_candidate_c_hash, + unconnected_candidate_c, + unconnected_pvd_c, + CandidateState::Backed, + ) + .unwrap(); modified_storage - .add_candidate_entry( - CandidateEntry::new( - unconnected_candidate_c_hash, - unconnected_candidate_c, - unconnected_pvd_c, - CandidateState::Backed, - ) - .unwrap(), - ) + .add_candidate_entry(unconnected_candidate_c_entry.clone()) .unwrap(); let scope = Scope::with_ancestors( relay_parent_z_info.clone(), base_constraints.clone(), - pending_availability.clone(), + vec![], 4, ancestors.clone(), ) .unwrap(); + let chain = FragmentChain::populate(scope.clone(), CandidateStorage::default()); + assert!(chain.can_add_candidate_as_potential(&unconnected_candidate_c_entry).is_ok()); let chain = FragmentChain::populate(scope, modified_storage.clone()); assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); @@ -702,7 +753,9 @@ fn test_populate() { ); // Candidate A is a pending availability candidate and Candidate C is an unconnected candidate, - // C's relay parent is not allowed to move backwards from A's relay parent. + // C's relay parent is not allowed to move backwards from A's relay parent because we're sure A + // will not get removed in the future, as it's already on-chain (unless it times out + // availability, a case for which we don't care to optimise for) modified_storage.remove_candidate(&candidate_a_hash); let (modified_pvd_a, modified_candidate_a) = make_committed_candidate( @@ -741,6 +794,10 @@ fn test_populate() { let chain = FragmentChain::populate(scope, modified_storage.clone()); assert_eq!(chain.best_chain_vec(), vec![modified_candidate_a_hash, candidate_b_hash]); assert_eq!(chain.unconnected_len(), 0); + assert_matches!( + chain.can_add_candidate_as_potential(&unconnected_candidate_c_entry), + Err(Error::RelayParentPrecedesCandidatePendingAvailability(_, _)) + ); // Not allowed to fork from a candidate pending availability let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( @@ -752,18 +809,17 @@ fn test_populate() { 0, ); let wrong_candidate_c_hash = wrong_candidate_c.hash(); - modified_storage - .add_candidate_entry( - CandidateEntry::new( - wrong_candidate_c_hash, - wrong_candidate_c, - wrong_pvd_c, - CandidateState::Backed, - ) - .unwrap(), - ) - .unwrap(); + let wrong_candidate_c_entry = CandidateEntry::new( + wrong_candidate_c_hash, + wrong_candidate_c, + wrong_pvd_c, + CandidateState::Backed, + ) + .unwrap(); + modified_storage.add_candidate_entry(wrong_candidate_c_entry.clone()).unwrap(); + // Does not even matter if the fork selection rule would have picked up the new candidate, as + // the other is already pending availability. assert_eq!( fork_selection_rule(&wrong_candidate_c_hash, &modified_candidate_a_hash), Ordering::Less @@ -784,6 +840,10 @@ fn test_populate() { let chain = FragmentChain::populate(scope, modified_storage.clone()); assert_eq!(chain.best_chain_vec(), vec![modified_candidate_a_hash, candidate_b_hash]); assert_eq!(chain.unconnected_len(), 0); + assert_matches!( + chain.can_add_candidate_as_potential(&wrong_candidate_c_entry), + Err(Error::ForkWithCandidatePendingAvailability(_)) + ); // Test with candidates pending availability { @@ -884,6 +944,7 @@ fn test_populate() { .unwrap(); let chain = FragmentChain::populate(scope, storage.clone()); assert!(chain.best_chain_vec().is_empty()); + assert_eq!(chain.unconnected_len(), 0); } // More complex case: @@ -903,7 +964,7 @@ fn test_populate() { let scope = Scope::with_ancestors( relay_parent_z_info.clone(), base_constraints.clone(), - pending_availability.clone(), + vec![], 2, ancestors.clone(), ) @@ -1064,6 +1125,17 @@ fn test_populate() { .collect() ); + // Simulate a best chain reorg by backing a2. + { + let chain = chain.candidate_backed(&candidate_a2_hash).unwrap(); + assert_eq!(chain.best_chain_vec(), vec![candidate_a2_hash, candidate_b2_hash]); + // F is kept as it was truly unconnected. The rest will be trimmed. + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_f_hash].into_iter().collect() + ); + } + // Candidate F has an invalid hrmp watermark. however, it was not checked beforehand as we don't // have its parent yet. Add its parent now. This will not impact anything as E is not yet part // of the best chain. @@ -1143,7 +1215,7 @@ fn test_populate() { let scope = Scope::with_ancestors( relay_parent_z_info.clone(), base_constraints.clone(), - pending_availability.clone(), + vec![], 2, ancestors.clone(), ) @@ -1209,60 +1281,20 @@ fn test_find_ancestor_path_and_find_backable_chain() { vec![0].into(), 0, )); - // Candidate 1 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![0].into(), - vec![1].into(), - 0, - )); - // Candidate 2 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![1].into(), - vec![2].into(), - 0, - )); - // Candidate 3 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![2].into(), - vec![3].into(), - 0, - )); - // Candidate 4 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![3].into(), - vec![4].into(), - 0, - )); - // Candidate 5 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![4].into(), - vec![5].into(), - 0, - )); - let base_constraints = make_constraints(0, vec![0], required_parent.clone()); - let mut storage = CandidateStorage::default(); + // Candidates 1..=5 + for index in 1..=5 { + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![index - 1].into(), + vec![index].into(), + 0, + )); + } - let relay_parent_info = RelayChainBlockInfo { - number: relay_parent_number, - hash: relay_parent, - storage_root: relay_parent_storage_root, - }; + let mut storage = CandidateStorage::default(); for (pvd, candidate) in candidates.iter() { storage @@ -1280,6 +1312,13 @@ fn test_find_ancestor_path_and_find_backable_chain() { let hashes = |range: Range| range.map(|i| (candidates[i], relay_parent)).collect::>(); + let relay_parent_info = RelayChainBlockInfo { + number: relay_parent_number, + hash: relay_parent, + storage_root: relay_parent_storage_root, + }; + + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); let scope = Scope::with_ancestors( relay_parent_info.clone(), base_constraints.clone(), @@ -1425,236 +1464,6 @@ fn test_find_ancestor_path_and_find_backable_chain() { } } -// #[test] -// fn hypothetical_membership() { -// let mut storage = CandidateStorage::default(); - -// let para_id = ParaId::from(5u32); -// let relay_parent_a = Hash::repeat_byte(1); - -// let (pvd_a, candidate_a) = make_committed_candidate( -// para_id, -// relay_parent_a, -// 0, -// vec![0x0a].into(), -// vec![0x0b].into(), -// 0, -// ); -// let candidate_a_hash = candidate_a.hash(); - -// let (pvd_b, candidate_b) = make_committed_candidate( -// para_id, -// relay_parent_a, -// 0, -// vec![0x0b].into(), -// vec![0x0c].into(), -// 0, -// ); -// let candidate_b_hash = candidate_b.hash(); - -// let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - -// let relay_parent_a_info = RelayChainBlockInfo { -// number: pvd_a.relay_parent_number, -// hash: relay_parent_a, -// storage_root: pvd_a.relay_parent_storage_root, -// }; - -// let max_depth = 4; -// storage.add_candidate(candidate_a, pvd_a, CandidateState::Seconded).unwrap(); -// storage.add_candidate(candidate_b, pvd_b, CandidateState::Seconded).unwrap(); -// let scope = Scope::with_ancestors( -// relay_parent_a_info.clone(), -// base_constraints.clone(), -// vec![], -// max_depth, -// vec![], -// ) -// .unwrap(); -// let chain = FragmentChain::populate(scope, &storage); - -// assert_eq!(chain.to_vec().len(), 2); - -// // Check candidates which are already present -// assert!(chain.hypothetical_membership( -// HypotheticalCandidate::Incomplete { -// parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), -// candidate_relay_parent: relay_parent_a, -// candidate_para: para_id, -// candidate_hash: candidate_a_hash, -// }, -// &storage, -// )); -// assert!(chain.hypothetical_membership( -// HypotheticalCandidate::Incomplete { -// parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), -// candidate_relay_parent: relay_parent_a, -// candidate_para: para_id, -// candidate_hash: candidate_b_hash, -// }, -// &storage, -// )); - -// // Forks not allowed. -// assert!(!chain.hypothetical_membership( -// HypotheticalCandidate::Incomplete { -// parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), -// candidate_relay_parent: relay_parent_a, -// candidate_para: para_id, -// candidate_hash: CandidateHash(Hash::repeat_byte(21)), -// }, -// &storage, -// )); -// assert!(!chain.hypothetical_membership( -// HypotheticalCandidate::Incomplete { -// parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), -// candidate_relay_parent: relay_parent_a, -// candidate_para: para_id, -// candidate_hash: CandidateHash(Hash::repeat_byte(22)), -// }, -// &storage, -// )); - -// // Unknown candidate which builds on top of the current chain. -// assert!(chain.hypothetical_membership( -// HypotheticalCandidate::Incomplete { -// parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), -// candidate_relay_parent: relay_parent_a, -// candidate_para: para_id, -// candidate_hash: CandidateHash(Hash::repeat_byte(23)), -// }, -// &storage, -// )); - -// // Unknown unconnected candidate which may be valid. -// assert!(chain.hypothetical_membership( -// HypotheticalCandidate::Incomplete { -// parent_head_data_hash: HeadData::from(vec![0x0e]).hash(), -// candidate_relay_parent: relay_parent_a, -// candidate_para: para_id, -// candidate_hash: CandidateHash(Hash::repeat_byte(23)), -// }, -// &storage, -// )); - -// // The number of unconnected candidates is limited (chain.len() + unconnected) <= max_depth -// { -// // C will be an unconnected candidate. -// let (pvd_c, candidate_c) = make_committed_candidate( -// para_id, -// relay_parent_a, -// 0, -// vec![0x0e].into(), -// vec![0x0f].into(), -// 0, -// ); -// let candidate_c_hash = candidate_c.hash(); - -// // Add an invalid candidate in the storage. This would introduce a fork. Just to test that -// // it's ignored. -// let (invalid_pvd, invalid_candidate) = make_committed_candidate( -// para_id, -// relay_parent_a, -// 1, -// vec![0x0a].into(), -// vec![0x0b].into(), -// 0, -// ); - -// let scope = Scope::with_ancestors(relay_parent_a_info, base_constraints, vec![], 2, vec![]) -// .unwrap(); -// let mut storage = storage.clone(); -// storage.add_candidate(candidate_c, pvd_c, CandidateState::Seconded).unwrap(); - -// let chain = FragmentChain::populate(scope, &storage); -// assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - -// storage -// .add_candidate(invalid_candidate, invalid_pvd, CandidateState::Seconded) -// .unwrap(); - -// // Check that C is accepted as a potential unconnected candidate. -// assert!(!chain.hypothetical_membership( -// HypotheticalCandidate::Incomplete { -// parent_head_data_hash: HeadData::from(vec![0x0e]).hash(), -// candidate_relay_parent: relay_parent_a, -// candidate_hash: candidate_c_hash, -// candidate_para: para_id -// }, -// &storage, -// )); - -// // Since C is already an unconnected candidate in the storage. -// assert!(!chain.hypothetical_membership( -// HypotheticalCandidate::Incomplete { -// parent_head_data_hash: HeadData::from(vec![0x0f]).hash(), -// candidate_relay_parent: relay_parent_a, -// candidate_para: para_id, -// candidate_hash: CandidateHash(Hash::repeat_byte(23)), -// }, -// &storage, -// )); -// } -// } - -// #[test] -// fn hypothetical_membership_stricter_on_complete_candidates() { -// let storage = CandidateStorage::default(); - -// let para_id = ParaId::from(5u32); -// let relay_parent_a = Hash::repeat_byte(1); - -// let (pvd_a, candidate_a) = make_committed_candidate( -// para_id, -// relay_parent_a, -// 0, -// vec![0x0a].into(), -// vec![0x0b].into(), -// 1000, // watermark is illegal -// ); - -// let candidate_a_hash = candidate_a.hash(); - -// let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); -// let pending_availability = Vec::new(); - -// let relay_parent_a_info = RelayChainBlockInfo { -// number: pvd_a.relay_parent_number, -// hash: relay_parent_a, -// storage_root: pvd_a.relay_parent_storage_root, -// }; - -// let max_depth = 4; -// let scope = Scope::with_ancestors( -// relay_parent_a_info, -// base_constraints, -// pending_availability, -// max_depth, -// vec![], -// ) -// .unwrap(); -// let chain = FragmentChain::populate(scope, &storage); - -// assert!(chain.hypothetical_membership( -// HypotheticalCandidate::Incomplete { -// parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), -// candidate_relay_parent: relay_parent_a, -// candidate_para: para_id, -// candidate_hash: candidate_a_hash, -// }, -// &storage, -// )); - -// assert!(!chain.hypothetical_membership( -// HypotheticalCandidate::Complete { -// receipt: Arc::new(candidate_a), -// persisted_validation_data: pvd_a, -// candidate_hash: candidate_a_hash, -// }, -// &storage, -// )); -// } - // #[test] // fn hypothetical_membership_with_pending_availability_in_scope() { // let mut storage = CandidateStorage::default(); From d758d14b60b1de1b24b6872687be159fa4996688 Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 29 Jul 2024 16:54:26 +0300 Subject: [PATCH 30/56] unit testing --- .../src/fragment_chain/tests.rs | 251 +++++++----------- .../core/prospective-parachains/src/lib.rs | 19 +- .../core/prospective-parachains/src/tests.rs | 202 +++++--------- 3 files changed, 168 insertions(+), 304 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index b4a2cdaeb1e0..7dda9ee28775 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -980,12 +980,12 @@ fn test_populate_and_check_potential() { relay_parent_z_info.number, ); let candidate_d_hash = candidate_d.hash(); - storage - .add_candidate_entry( - CandidateEntry::new(candidate_d_hash, candidate_d, pvd_d, CandidateState::Backed) - .unwrap(), - ) - .unwrap(); + let candidate_d_entry = + CandidateEntry::new(candidate_d_hash, candidate_d, pvd_d, CandidateState::Backed).unwrap(); + assert!(FragmentChain::populate(scope.clone(), storage.clone()) + .can_add_candidate_as_potential(&candidate_d_entry) + .is_ok()); + storage.add_candidate_entry(candidate_d_entry).unwrap(); // Candidate F let (pvd_f, candidate_f) = make_committed_candidate( @@ -997,12 +997,13 @@ fn test_populate_and_check_potential() { 1000, ); let candidate_f_hash = candidate_f.hash(); - storage - .add_candidate_entry( - CandidateEntry::new(candidate_f_hash, candidate_f, pvd_f, CandidateState::Seconded) - .unwrap(), - ) - .unwrap(); + let candidate_f_entry = + CandidateEntry::new(candidate_f_hash, candidate_f, pvd_f, CandidateState::Seconded) + .unwrap(); + assert!(FragmentChain::populate(scope.clone(), storage.clone()) + .can_add_candidate_as_potential(&candidate_f_entry) + .is_ok()); + storage.add_candidate_entry(candidate_f_entry.clone()).unwrap(); // Candidate A1 let (pvd_a1, candidate_a1) = make_committed_candidate( @@ -1014,15 +1015,19 @@ fn test_populate_and_check_potential() { relay_parent_x_info.number, ); let candidate_a1_hash = candidate_a1.hash(); + let candidate_a1_entry = + CandidateEntry::new(candidate_a1_hash, candidate_a1, pvd_a1, CandidateState::Backed) + .unwrap(); // Candidate A1 is created so that its hash is larger than the candidate A hash. assert_eq!(fork_selection_rule(&candidate_a_hash, &candidate_a1_hash), Ordering::Less); - storage - .add_candidate_entry( - CandidateEntry::new(candidate_a1_hash, candidate_a1, pvd_a1, CandidateState::Backed) - .unwrap(), - ) - .unwrap(); + assert_matches!( + FragmentChain::populate(scope.clone(), storage.clone()) + .can_add_candidate_as_potential(&candidate_a1_entry), + Err(Error::ForkChoiceRule(other)) if candidate_a_hash == other + ); + + storage.add_candidate_entry(candidate_a1_entry.clone()).unwrap(); // Candidate B1. let (pvd_b1, candidate_b1) = make_committed_candidate( @@ -1034,13 +1039,14 @@ fn test_populate_and_check_potential() { relay_parent_x_info.number, ); let candidate_b1_hash = candidate_b1.hash(); + let candidate_b1_entry = + CandidateEntry::new(candidate_b1_hash, candidate_b1, pvd_b1, CandidateState::Seconded) + .unwrap(); + assert!(FragmentChain::populate(scope.clone(), storage.clone()) + .can_add_candidate_as_potential(&candidate_b1_entry) + .is_ok()); - storage - .add_candidate_entry( - CandidateEntry::new(candidate_b1_hash, candidate_b1, pvd_b1, CandidateState::Seconded) - .unwrap(), - ) - .unwrap(); + storage.add_candidate_entry(candidate_b1_entry).unwrap(); // Candidate C1. let (pvd_c1, candidate_c1) = make_committed_candidate( @@ -1052,13 +1058,14 @@ fn test_populate_and_check_potential() { relay_parent_x_info.number, ); let candidate_c1_hash = candidate_c1.hash(); + let candidate_c1_entry = + CandidateEntry::new(candidate_c1_hash, candidate_c1, pvd_c1, CandidateState::Backed) + .unwrap(); + assert!(FragmentChain::populate(scope.clone(), storage.clone()) + .can_add_candidate_as_potential(&candidate_c1_entry) + .is_ok()); - storage - .add_candidate_entry( - CandidateEntry::new(candidate_c1_hash, candidate_c1, pvd_c1, CandidateState::Backed) - .unwrap(), - ) - .unwrap(); + storage.add_candidate_entry(candidate_c1_entry).unwrap(); // Candidate C2. let (pvd_c2, candidate_c2) = make_committed_candidate( @@ -1070,13 +1077,13 @@ fn test_populate_and_check_potential() { relay_parent_x_info.number, ); let candidate_c2_hash = candidate_c2.hash(); - - storage - .add_candidate_entry( - CandidateEntry::new(candidate_c2_hash, candidate_c2, pvd_c2, CandidateState::Seconded) - .unwrap(), - ) - .unwrap(); + let candidate_c2_entry = + CandidateEntry::new(candidate_c2_hash, candidate_c2, pvd_c2, CandidateState::Seconded) + .unwrap(); + assert!(FragmentChain::populate(scope.clone(), storage.clone()) + .can_add_candidate_as_potential(&candidate_c2_entry) + .is_ok()); + storage.add_candidate_entry(candidate_c2_entry).unwrap(); // Candidate A2. let (pvd_a2, candidate_a2) = make_committed_candidate( @@ -1088,15 +1095,17 @@ fn test_populate_and_check_potential() { relay_parent_x_info.number, ); let candidate_a2_hash = candidate_a2.hash(); + let candidate_a2_entry = + CandidateEntry::new(candidate_a2_hash, candidate_a2, pvd_a2, CandidateState::Seconded) + .unwrap(); // Candidate A2 is created so that its hash is larger than the candidate A hash. assert_eq!(fork_selection_rule(&candidate_a2_hash, &candidate_a_hash), Ordering::Less); - storage - .add_candidate_entry( - CandidateEntry::new(candidate_a2_hash, candidate_a2, pvd_a2, CandidateState::Seconded) - .unwrap(), - ) - .unwrap(); + assert!(FragmentChain::populate(scope.clone(), storage.clone()) + .can_add_candidate_as_potential(&candidate_a2_entry) + .is_ok()); + + storage.add_candidate_entry(candidate_a2_entry).unwrap(); // Candidate B2. let (pvd_b2, candidate_b2) = make_committed_candidate( @@ -1108,13 +1117,13 @@ fn test_populate_and_check_potential() { relay_parent_y_info.number, ); let candidate_b2_hash = candidate_b2.hash(); - - storage - .add_candidate_entry( - CandidateEntry::new(candidate_b2_hash, candidate_b2, pvd_b2, CandidateState::Backed) - .unwrap(), - ) - .unwrap(); + let candidate_b2_entry = + CandidateEntry::new(candidate_b2_hash, candidate_b2, pvd_b2, CandidateState::Backed) + .unwrap(); + assert!(FragmentChain::populate(scope.clone(), storage.clone()) + .can_add_candidate_as_potential(&candidate_b2_entry) + .is_ok()); + storage.add_candidate_entry(candidate_b2_entry).unwrap(); let chain = FragmentChain::populate(scope.clone(), storage.clone()); assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); @@ -1124,6 +1133,16 @@ fn test_populate_and_check_potential() { .into_iter() .collect() ); + // Cannot add as potential an already present candidate (whether it's in the best chain or in + // unconnected storage) + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_a_entry), + Err(Error::CandidateAlreadyKnown) + ); + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_f_entry), + Err(Error::CandidateAlreadyKnown) + ); // Simulate a best chain reorg by backing a2. { @@ -1134,6 +1153,16 @@ fn test_populate_and_check_potential() { chain.unconnected().map(|c| c.candidate_hash).collect::>(), [candidate_f_hash].into_iter().collect() ); + + // A and A1 will never have potential again. + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_a1_entry), + Err(Error::ForkChoiceRule(_)) + ); + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_a_entry), + Err(Error::ForkChoiceRule(_)) + ); } // Candidate F has an invalid hrmp watermark. however, it was not checked beforehand as we don't @@ -1201,6 +1230,11 @@ fn test_populate_and_check_potential() { chain.unconnected().map(|c| c.candidate_hash).collect::>(), [candidate_d_hash, candidate_f_hash, candidate_e_hash].into_iter().collect() ); + // Cannot add as potential an already pending availability candidate + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_a_entry), + Err(Error::CandidateAlreadyPendingAvailability) + ); // Simulate the fact that candidates A, B and C have been included. @@ -1232,6 +1266,11 @@ fn test_populate_and_check_potential() { let chain = chain.candidate_backed(&candidate_e_hash).unwrap(); assert_eq!(chain.best_chain_vec(), vec![candidate_d_hash, candidate_e_hash]); assert_eq!(chain.unconnected_len(), 0); + + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_f_entry), + Err(Error::CheckAgainstConstraints(_)) + ); } #[test] @@ -1463,115 +1502,3 @@ fn test_find_ancestor_path_and_find_backable_chain() { ); } } - -// #[test] -// fn hypothetical_membership_with_pending_availability_in_scope() { -// let mut storage = CandidateStorage::default(); - -// let para_id = ParaId::from(5u32); -// let relay_parent_a = Hash::repeat_byte(1); -// let relay_parent_b = Hash::repeat_byte(2); -// let relay_parent_c = Hash::repeat_byte(3); - -// let (pvd_a, candidate_a) = make_committed_candidate( -// para_id, -// relay_parent_a, -// 0, -// vec![0x0a].into(), -// vec![0x0b].into(), -// 0, -// ); -// let candidate_a_hash = candidate_a.hash(); - -// let (pvd_b, candidate_b) = make_committed_candidate( -// para_id, -// relay_parent_b, -// 1, -// vec![0x0b].into(), -// vec![0x0c].into(), -// 1, -// ); - -// // Note that relay parent `a` is not allowed. -// let base_constraints = make_constraints(1, vec![], vec![0x0a].into()); - -// let relay_parent_a_info = RelayChainBlockInfo { -// number: pvd_a.relay_parent_number, -// hash: relay_parent_a, -// storage_root: pvd_a.relay_parent_storage_root, -// }; -// let pending_availability = vec![PendingAvailability { -// candidate_hash: candidate_a_hash, -// relay_parent: relay_parent_a_info, -// }]; - -// let relay_parent_b_info = RelayChainBlockInfo { -// number: pvd_b.relay_parent_number, -// hash: relay_parent_b, -// storage_root: pvd_b.relay_parent_storage_root, -// }; -// let relay_parent_c_info = RelayChainBlockInfo { -// number: pvd_b.relay_parent_number + 1, -// hash: relay_parent_c, -// storage_root: Hash::zero(), -// }; - -// let max_depth = 4; -// storage.add_candidate(candidate_a, pvd_a, CandidateState::Seconded).unwrap(); -// storage.add_candidate(candidate_b, pvd_b, CandidateState::Backed).unwrap(); -// storage.mark_backed(&candidate_a_hash); - -// let scope = Scope::with_ancestors( -// relay_parent_c_info, -// base_constraints, -// pending_availability, -// max_depth, -// vec![relay_parent_b_info], -// ) -// .unwrap(); -// let chain = FragmentChain::populate(scope, &storage); - -// assert_eq!(chain.to_vec().len(), 2); - -// let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); - -// assert!(chain.hypothetical_membership( -// HypotheticalCandidate::Incomplete { -// parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), -// candidate_relay_parent: relay_parent_a, -// candidate_hash: candidate_a_hash, -// candidate_para: para_id -// }, -// &storage, -// )); - -// assert!(!chain.hypothetical_membership( -// HypotheticalCandidate::Incomplete { -// parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), -// candidate_relay_parent: relay_parent_c, -// candidate_para: para_id, -// candidate_hash: candidate_d_hash, -// }, -// &storage, -// )); - -// assert!(!chain.hypothetical_membership( -// HypotheticalCandidate::Incomplete { -// parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), -// candidate_relay_parent: relay_parent_c, -// candidate_para: para_id, -// candidate_hash: candidate_d_hash, -// }, -// &storage, -// )); - -// assert!(chain.hypothetical_membership( -// HypotheticalCandidate::Incomplete { -// parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), -// candidate_relay_parent: relay_parent_b, -// candidate_para: para_id, -// candidate_hash: candidate_d_hash, -// }, -// &storage, -// )); -// } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 02dff4a3e1a9..dd6a52c19627 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -180,6 +180,10 @@ async fn handle_active_leaves_update( let mut temp_header_cache = HashMap::new(); for activated in update.activated.into_iter() { + if update.deactivated.contains(&activated.hash) { + continue + } + let hash = activated.hash; let mode = prospective_parachains_mode(ctx.sender(), hash) @@ -217,23 +221,12 @@ async fn handle_active_leaves_update( Some(info) => info, }; - let requested_ancestry_len = if allowed_ancestry_len == 0 { - 1 - // We should try requesting at least one, so that we can know the previous leaf. - } else { - allowed_ancestry_len - }; - let mut ancestry = - fetch_ancestry(ctx, &mut temp_header_cache, hash, requested_ancestry_len).await?; + let ancestry = + fetch_ancestry(ctx, &mut temp_header_cache, hash, allowed_ancestry_len).await?; let prev_fragment_chains = ancestry.first().and_then(|prev_leaf| view.active_leaves.get(&prev_leaf.hash)); - if allowed_ancestry_len == 0 { - // Now, if the allowed ancestry len was 0, clear the one ancestor we requested. - ancestry.clear(); - } - let mut fragment_chains = HashMap::new(); for para in scheduled_paras { // Find constraints and pending availability candidates. diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 85ae4926a491..c21368ed97f0 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -761,17 +761,27 @@ fn introduce_candidate_multiple_times() { 1.into(), Ancestors::default(), 5, - response_a, + response_a.clone(), ) .await; // Introduce the same candidate multiple times. It'll return true but it won't be added. - // We'll check below that the candidate count remains 1. for _ in 0..5 { introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) .await; } + // Check candidate tree membership. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + response_a, + ) + .await; + virtual_overseer }); @@ -779,7 +789,7 @@ fn introduce_candidate_multiple_times() { } #[test] -fn fragment_chain_length_is_bounded() { +fn fragment_chain_best_chain_length_is_bounded() { let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -827,12 +837,11 @@ fn fragment_chain_length_is_bounded() { ); // Introduce candidates A and B. Since max depth is 1, only these two will be allowed. - introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) - .await; - introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()) - .await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; - // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates and + // they won't be part of the best chain. back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; back_candidate(&mut virtual_overseer, &candidate_b, candidate_b.hash()).await; @@ -847,93 +856,21 @@ fn fragment_chain_length_is_bounded() { ) .await; - // Introducing C will fail. - introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c.clone()) - .await; - - virtual_overseer - }); - - assert_eq!(view.active_leaves.len(), 1); -} - -#[test] -fn unconnected_candidate_count_is_bounded() { - let test_state = TestState::default(); - let view = test_harness(|mut virtual_overseer| async move { - // Leaf A - let leaf_a = TestLeaf { - number: 100, - hash: Hash::from_low_u64_be(130), - para_data: vec![ - (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), - (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), - ], - }; - // Activate leaves. - activate_leaf_with_params( - &mut virtual_overseer, - &leaf_a, - &test_state, - AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 3 }, - ) - .await; - - // Candidates A, B and C are all potential candidates but don't form a chain. - let (candidate_a, pvd_a) = make_candidate( - leaf_a.hash, - leaf_a.number, - 1.into(), - HeadData(vec![1]), - HeadData(vec![2]), - test_state.validation_code_hash, - ); - let (candidate_b, pvd_b) = make_candidate( - leaf_a.hash, - leaf_a.number, - 1.into(), - HeadData(vec![3]), - HeadData(vec![4]), - test_state.validation_code_hash, - ); - let (candidate_c, pvd_c) = make_candidate( - leaf_a.hash, - leaf_a.number, - 1.into(), - HeadData(vec![4]), - HeadData(vec![5]), - test_state.validation_code_hash, - ); - - // Introduce candidates A and B. Although max depth is 1 (which should allow for two - // candidates), only 1 is allowed, because the last candidate must be a connected candidate. - introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) - .await; - introduce_seconded_candidate_failed( - &mut virtual_overseer, - candidate_b.clone(), - pvd_b.clone(), - ) - .await; - - // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. - back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; + // Introducing C will not fail. It will be kept as unconnected storage. + introduce_seconded_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c).await; + // When being backed, candidate C will be dropped. + back_candidate(&mut virtual_overseer, &candidate_c, candidate_c.hash()).await; - // Check candidate tree membership. Should be empty. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), Ancestors::default(), 5, - vec![], + vec![(candidate_a.hash(), leaf_a.hash), (candidate_b.hash(), leaf_a.hash)], ) .await; - // Introducing C will also fail. - introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c.clone()) - .await; - virtual_overseer }); @@ -1856,11 +1793,13 @@ fn check_hypothetical_membership_query() { introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) .await; - // Get membership of candidates after adding A. C is not a potential candidate because we - // may only add one more candidate, which must be a connected candidate. - for (candidate, pvd) in - [(candidate_a.clone(), pvd_a.clone()), (candidate_b.clone(), pvd_b.clone())] - { + // Get membership of candidates after adding A. They all are still unconnected candidates + // (not part of the best backable chain). + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { get_hypothetical_membership( &mut virtual_overseer, candidate.hash(), @@ -1871,14 +1810,24 @@ fn check_hypothetical_membership_query() { .await; } - get_hypothetical_membership( - &mut virtual_overseer, - candidate_c.hash(), - candidate_c.clone(), - pvd_c.clone(), - vec![], - ) - .await; + // Back A. Now A is part of the best chain the rest can be added as unconnected. + + back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; + + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { + get_hypothetical_membership( + &mut virtual_overseer, + candidate.hash(), + candidate, + pvd, + vec![leaf_a.hash, leaf_b.hash], + ) + .await; + } // Candidate D has invalid relay parent. let (candidate_d, pvd_d) = make_candidate( @@ -1891,14 +1840,17 @@ fn check_hypothetical_membership_query() { ); introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_d, pvd_d).await; - // Add candidate B. + // Add candidate B and back it. introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()) .await; + back_candidate(&mut virtual_overseer, &candidate_b, candidate_b.hash()).await; // Get membership of candidates after adding B. - for (candidate, pvd) in - [(candidate_a.clone(), pvd_a.clone()), (candidate_b.clone(), pvd_b.clone())] - { + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { get_hypothetical_membership( &mut virtual_overseer, candidate.hash(), @@ -1909,18 +1861,6 @@ fn check_hypothetical_membership_query() { .await; } - get_hypothetical_membership( - &mut virtual_overseer, - candidate_c.hash(), - candidate_c.clone(), - pvd_c.clone(), - vec![], - ) - .await; - - // Add candidate C. It will fail because we have enough candidates for the configured depth. - introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c).await; - virtual_overseer }); @@ -1974,6 +1914,16 @@ fn check_pvd_query() { test_state.validation_code_hash, ); + // Candidate E. + let (candidate_e, pvd_e) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![5]), + HeadData(vec![6]), + test_state.validation_code_hash, + ); + // Get pvd of candidate A before adding it. get_pvd( &mut virtual_overseer, @@ -2036,14 +1986,15 @@ fn check_pvd_query() { introduce_seconded_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone()).await; // Get pvd of candidate C after adding it. - get_pvd( - &mut virtual_overseer, - 1.into(), - leaf_a.hash, - HeadData(vec![2]), - Some(pvd_c.clone()), - ) - .await; + get_pvd(&mut virtual_overseer, 1.into(), leaf_a.hash, HeadData(vec![2]), Some(pvd_c)).await; + + // Get pvd of candidate E before adding it. It won't be found, as we don't have its parent. + get_pvd(&mut virtual_overseer, 1.into(), leaf_a.hash, HeadData(vec![5]), None).await; + + // Add candidate E and check again. Should succeed this time. + introduce_seconded_candidate(&mut virtual_overseer, candidate_e, pvd_e.clone()).await; + + get_pvd(&mut virtual_overseer, 1.into(), leaf_a.hash, HeadData(vec![5]), Some(pvd_e)).await; virtual_overseer }); @@ -2139,13 +2090,6 @@ fn correctly_updates_leaves(#[case] runtime_api_version: u32) { virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update))) .await; - handle_leaf_activation( - &mut virtual_overseer, - &leaf_a, - &test_state, - ASYNC_BACKING_PARAMETERS, - ) - .await; // Remove the leaf again. Send some unnecessary hashes. let update = ActiveLeavesUpdate { From 8858f360879919e526d43179d9bf6f451fae7793 Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 30 Jul 2024 15:48:02 +0300 Subject: [PATCH 31/56] remove unused deps --- Cargo.lock | 8 +------- polkadot/node/core/prospective-parachains/Cargo.toml | 8 +------- polkadot/node/core/prospective-parachains/src/lib.rs | 2 ++ 3 files changed, 4 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cfc9a5f609f1..3421ee21f546 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13423,23 +13423,17 @@ name = "polkadot-node-core-prospective-parachains" version = "6.0.0" dependencies = [ "assert_matches", - "bitvec", "fatality", "futures", - "parity-scale-codec", - "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", - "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", "rand", "rstest", - "sp-application-crypto", "sp-core", - "sp-keyring", - "sp-keystore", + "sp-tracing 16.0.0", "thiserror", "tracing-gum", ] diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 8489d77d82e7..705014e67a05 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -12,24 +12,18 @@ workspace = true [dependencies] futures = { workspace = true } gum = { workspace = true, default-features = true } -codec = { workspace = true, default-features = true } thiserror = { workspace = true } fatality = { workspace = true } -bitvec = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-node-subsystem-types = { workspace = true, default-features = true } polkadot-primitives-test-helpers = { workspace = true } +sp-tracing = { workspace = true } sp-core = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } rand = { workspace = true } rstest = { workspace = true } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index dd6a52c19627..9f40843a3ebc 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -26,6 +26,8 @@ //! //! This subsystem also handles concerns such as the relay-chain being forkful and session changes. +#![deny(unused_crate_dependencies)] + use std::collections::{HashMap, HashSet}; use fragment_chain::CandidateStorage; From b2612e950193e33b64c842d53ddb72be69b1aeef Mon Sep 17 00:00:00 2001 From: alindima Date: Tue, 30 Jul 2024 15:48:34 +0300 Subject: [PATCH 32/56] finish up tests --- .../core/prospective-parachains/src/tests.rs | 254 ++++++++++++++++-- 1 file changed, 237 insertions(+), 17 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index c21368ed97f0..825cce92662c 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -111,6 +111,8 @@ fn get_parent_hash(hash: Hash) -> Hash { fn test_harness>( test: impl FnOnce(VirtualOverseer) -> T, ) -> View { + sp_tracing::init_for_tests(); + let pool = sp_core::testing::TaskExecutor::new(); let (mut context, virtual_overseer) = @@ -307,16 +309,20 @@ async fn handle_leaf_activation( ); } + let mut used_relay_parents = HashSet::new(); for (hash, number) in ancestry_iter { - send_block_header(virtual_overseer, hash, number).await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) - ) if parent == hash => { - tx.send(Ok(1)).unwrap(); - } - ); + if !used_relay_parents.contains(&hash) { + send_block_header(virtual_overseer, hash, number).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) + ) if parent == hash => { + tx.send(Ok(1)).unwrap(); + } + ); + used_relay_parents.insert(hash); + } } let paras: HashSet<_> = test_state.claim_queue.values().flatten().collect(); @@ -353,12 +359,16 @@ async fn handle_leaf_activation( ); for pending in pending_availability { - send_block_header( - virtual_overseer, - pending.descriptor.relay_parent, - pending.relay_parent_number, - ) - .await; + if !used_relay_parents.contains(&pending.descriptor.relay_parent) { + send_block_header( + virtual_overseer, + pending.descriptor.relay_parent, + pending.relay_parent_number, + ) + .await; + + used_relay_parents.insert(pending.descriptor.relay_parent); + } } } @@ -765,7 +775,8 @@ fn introduce_candidate_multiple_times() { ) .await; - // Introduce the same candidate multiple times. It'll return true but it won't be added. + // Introduce the same candidate multiple times. It'll return true but it will only be added + // once. for _ in 0..5 { introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) .await; @@ -1175,6 +1186,8 @@ fn introduce_candidate_on_multiple_forks() { #[test] fn unconnected_candidates_become_connected() { + // This doesn't test all the complicated cases with many unconnected candidates, as it's more + // extensively tested in the `fragment_chain::tests` module. let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -1351,6 +1364,10 @@ fn check_backable_query_single_candidate() { back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; + // Back an unknown candidate. It doesn't return anything but it's ignored. Will not have any + // effect on the backable candidates. + back_candidate(&mut virtual_overseer, &candidate_b, CandidateHash(Hash::random())).await; + // Should not get any backable candidates for the other para. get_backable_candidates( &mut virtual_overseer, @@ -2106,6 +2123,197 @@ fn correctly_updates_leaves(#[case] runtime_api_version: u32) { assert_eq!(view.active_leaves.len(), 0); } +#[test] +fn handle_active_leaves_update_gets_candidates_from_parent() { + let para_id = ParaId::from(1); + let mut test_state = TestState::default(); + test_state.claim_queue = test_state + .claim_queue + .into_iter() + .filter(|(_, paras)| matches!(paras.front(), Some(para) if para == ¶_id)) + .collect(); + assert_eq!(test_state.claim_queue.len(), 1); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![(para_id, PerParaData::new(97, HeadData(vec![1, 2, 3])))], + }; + // Activate leaf A. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + + // Candidates A, B, C and D all form a chain + let (candidate_a, pvd_a) = make_candidate( + leaf_a.hash, + leaf_a.number, + para_id, + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let (candidate_b, pvd_b) = make_candidate( + leaf_a.hash, + leaf_a.number, + para_id, + HeadData(vec![1]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + let (candidate_c, pvd_c) = make_candidate( + leaf_a.hash, + leaf_a.number, + para_id, + HeadData(vec![2]), + HeadData(vec![3]), + test_state.validation_code_hash, + ); + let (candidate_d, pvd_d) = make_candidate( + leaf_a.hash, + leaf_a.number, + para_id, + HeadData(vec![3]), + HeadData(vec![4]), + test_state.validation_code_hash, + ); + + // Introduce candidates. + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()) + .await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c.clone()) + .await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_d.clone(), pvd_d.clone()) + .await; + + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; + back_candidate(&mut virtual_overseer, &candidate_b, candidate_b.hash()).await; + back_candidate(&mut virtual_overseer, &candidate_c, candidate_c.hash()).await; + back_candidate(&mut virtual_overseer, &candidate_d, candidate_d.hash()).await; + + // Check candidate tree membership. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + para_id, + Ancestors::default(), + 5, + vec![ + (candidate_a.hash(), leaf_a.hash), + (candidate_b.hash(), leaf_a.hash), + (candidate_c.hash(), leaf_a.hash), + (candidate_d.hash(), leaf_a.hash), + ], + ) + .await; + + // Activate leaf B, which makes candidates A and B pending availability. + // Leaf B + let leaf_b = TestLeaf { + number: 101, + hash: Hash::from_low_u64_be(129), + para_data: vec![( + para_id, + PerParaData::new_with_pending( + 98, + HeadData(vec![1, 2, 3]), + vec![ + CandidatePendingAvailability { + candidate_hash: candidate_a.hash(), + descriptor: candidate_a.descriptor.clone(), + commitments: candidate_a.commitments.clone(), + relay_parent_number: leaf_a.number, + max_pov_size: MAX_POV_SIZE, + }, + CandidatePendingAvailability { + candidate_hash: candidate_b.hash(), + descriptor: candidate_b.descriptor.clone(), + commitments: candidate_b.commitments.clone(), + relay_parent_number: leaf_a.number, + max_pov_size: MAX_POV_SIZE, + }, + ], + ), + )], + }; + // Activate leaf B. + activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + Ancestors::default(), + 5, + vec![], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + [candidate_a.hash(), candidate_b.hash()].into_iter().collect(), + 5, + vec![(candidate_c.hash(), leaf_a.hash), (candidate_d.hash(), leaf_a.hash)], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + Ancestors::default(), + 5, + vec![], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + para_id, + Ancestors::default(), + 5, + vec![ + (candidate_a.hash(), leaf_a.hash), + (candidate_b.hash(), leaf_a.hash), + (candidate_c.hash(), leaf_a.hash), + (candidate_d.hash(), leaf_a.hash), + ], + ) + .await; + + // Now deactivate leaf A. + deactivate_leaf(&mut virtual_overseer, leaf_a.hash).await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + Ancestors::default(), + 5, + vec![], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + [candidate_a.hash(), candidate_b.hash()].into_iter().collect(), + 5, + vec![(candidate_c.hash(), leaf_a.hash), (candidate_d.hash(), leaf_a.hash)], + ) + .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 1); +} + #[test] fn persists_pending_availability_candidate() { let mut test_state = TestState::default(); @@ -2162,7 +2370,8 @@ fn persists_pending_availability_candidate() { ); let candidate_hash_b = candidate_b.hash(); - introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; let candidate_a_pending_av = CandidatePendingAvailability { @@ -2186,6 +2395,17 @@ fn persists_pending_availability_candidate() { }; activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; + // Hypothetical membership for a candidate already pending availability will not return the + // leaves where it's pending availability. A is only pending availability on leaf B. + get_hypothetical_membership( + &mut virtual_overseer, + candidate_hash_a, + candidate_a, + pvd_a, + vec![leaf_a.hash], + ) + .await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; From 3443af133205301197ec1b0c7fa878031aff139f Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 31 Jul 2024 10:06:58 +0300 Subject: [PATCH 33/56] add prdoc --- prdoc/pr_4937.prdoc | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 prdoc/pr_4937.prdoc diff --git a/prdoc/pr_4937.prdoc b/prdoc/pr_4937.prdoc new file mode 100644 index 000000000000..916340e42545 --- /dev/null +++ b/prdoc/pr_4937.prdoc @@ -0,0 +1,23 @@ +title: "prospective-parachains rework: take II" + +doc: + - audience: Node Dev + description: | + Add back support for backing parachain forks. Once a candidate reaches the backing quorum, + validators use a shared way of picking the winning fork to back on-chain. This was done in + order to increase the likelihood that all backers will vote on the winning fork. + The functionality of backing unconnected candidates introduced by the previous rework is preserved. + +crates: + - name: polkadot-node-core-prospective-parachains + bump: minor + - name: polkadot-node-subsystem-types + bump: minor + - name: polkadot-node-subsystem-util + bump: minor + - name: polkadot-node-core-provisioner + bump: none + - name: polkadot-collator-protocol + bump: none + - name: polkadot-statement-distribution + bump: none From 4b975cb5e014e6cfbcd1d42077dcb0eaabddbebe Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 31 Jul 2024 10:08:34 +0300 Subject: [PATCH 34/56] rollback CI yaml changes --- .gitlab/pipeline/check.yml | 2 +- .gitlab/pipeline/test.yml | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 53bd8419dbc1..2b8b90ef19a4 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -3,7 +3,7 @@ cargo-clippy: extends: - .docker-env - .common-refs - # - .pipeline-stopper-artifacts + - .pipeline-stopper-artifacts variables: RUSTFLAGS: "-D warnings" script: diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 518953e5e288..d171a8a19426 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -32,7 +32,7 @@ codecov-start: extends: - .kubernetes-env - .common-refs - # - .pipeline-stopper-artifacts + - .pipeline-stopper-artifacts - .run-immediately script: - !reference [.codecov-check, script] @@ -53,7 +53,7 @@ codecov-finish: extends: - .kubernetes-env - .common-refs - # - .pipeline-stopper-artifacts + - .pipeline-stopper-artifacts needs: - test-linux-stable-codecov script: @@ -72,7 +72,7 @@ test-linux-stable-codecov: extends: - .docker-env - .common-refs - # - .pipeline-stopper-artifacts + - .pipeline-stopper-artifacts variables: CI_IMAGE: europe-docker.pkg.dev/parity-build/ci-images/ci-unified:bullseye-1.77.0 RUST_TOOLCHAIN: stable @@ -118,7 +118,7 @@ test-linux-stable: - .docker-env - .common-refs - .run-immediately - # - .pipeline-stopper-artifacts + - .pipeline-stopper-artifacts variables: RUST_TOOLCHAIN: stable # Enable debug assertions since we are running optimized builds for testing @@ -167,7 +167,7 @@ test-linux-stable-runtime-benchmarks: - .docker-env - .common-refs - .run-immediately - # - .pipeline-stopper-artifacts + - .pipeline-stopper-artifacts variables: RUST_TOOLCHAIN: stable # Enable debug assertions since we are running optimized builds for testing @@ -303,7 +303,7 @@ cargo-check-benches: - .common-refs - .run-immediately - .collect-artifacts - # - .pipeline-stopper-artifacts + - .pipeline-stopper-artifacts before_script: # TODO: DON'T FORGET TO CHANGE FOR PROD VALUES!!! # merges in the master branch on PRs. skip if base is not master @@ -463,7 +463,7 @@ test-linux-stable-int: - .docker-env - .common-refs - .run-immediately - # - .pipeline-stopper-artifacts + - .pipeline-stopper-artifacts variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. @@ -485,7 +485,7 @@ check-tracing: - .docker-env - .common-refs - .run-immediately - # - .pipeline-stopper-artifacts + - .pipeline-stopper-artifacts script: # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases - time cargo test --locked --manifest-path ./substrate/primitives/tracing/Cargo.toml --no-default-features @@ -498,7 +498,7 @@ check-metadata-hash: - .docker-env - .common-refs - .run-immediately - # - .pipeline-stopper-artifacts + - .pipeline-stopper-artifacts script: - time cargo build --locked -p westend-runtime --features metadata-hash From e4dd3992f81b31876f5893ffbedb4f483f0c4c9d Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 1 Aug 2024 13:29:57 +0300 Subject: [PATCH 35/56] correct log --- .../node/core/prospective-parachains/src/fragment_chain/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index c95ce31aea55..1bb0837969ce 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -170,7 +170,7 @@ pub(crate) enum Error { RelayParentMovedBackwards, #[error(transparent)] CandidateEntry(#[from] CandidateEntryError), - #[error("Relay parent {0:?} not in scope. Earliest relay parent allowed {0:?}")] + #[error("Relay parent {0:?} not in scope. Earliest relay parent allowed {1:?}")] RelayParentNotInScope(Hash, Hash), } From d43f02436ec98d4907cb987f9742ea7c4b061179 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 1 Aug 2024 20:36:05 +0300 Subject: [PATCH 36/56] fix another log --- .../node/core/prospective-parachains/src/fragment_chain/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 1bb0837969ce..9a3cdf0b37c3 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -154,7 +154,7 @@ pub(crate) enum Error { MultiplePaths, #[error("Attempting to directly introduce a Backed candidate. It should first be introduced as Seconded")] IntroduceBackedCandidate, - #[error("Relay parent {0:?} of the candidate precedes the relay parent {0:?} of a pending availability candidate")] + #[error("Relay parent {0:?} of the candidate precedes the relay parent {1:?} of a pending availability candidate")] RelayParentPrecedesCandidatePendingAvailability(Hash, Hash), #[error("Candidate would introduce a fork with a pending availability candidate: {0:?}")] ForkWithCandidatePendingAvailability(CandidateHash), From 10e3425c7870cde40566268a4562562ab0e181f7 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 1 Aug 2024 20:44:10 +0300 Subject: [PATCH 37/56] remove CandidateAlreadyPendingAvailability error variant --- .../src/fragment_chain/mod.rs | 10 +--------- .../src/fragment_chain/tests.rs | 2 +- .../node/core/prospective-parachains/src/lib.rs | 17 +---------------- .../core/prospective-parachains/src/tests.rs | 4 +--- 4 files changed, 4 insertions(+), 29 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 9a3cdf0b37c3..d24fc99237b9 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -144,8 +144,6 @@ use thiserror::Error; pub(crate) enum Error { #[error("Candidate already known")] CandidateAlreadyKnown, - #[error("Candidate is already pending availability")] - CandidateAlreadyPendingAvailability, #[error("Candidate's parent head is equal to its output head. Would introduce a cycle.")] ZeroLengthCycle, #[error("Candidate would introduce a cycle")] @@ -836,19 +834,13 @@ impl FragmentChain { /// Checks if this candidate could be added in the future to this chain. /// This will return `Error::CandidateAlreadyKnown` if the candidate is already in the chain or - /// the unconnected candidate storage. It will return - /// `Error::CandidateAlreadyPendingAvailability` if the candidate is already pending - /// availability. + /// the unconnected candidate storage. pub fn can_add_candidate_as_potential( &self, candidate: &impl HypotheticalOrConcreteCandidate, ) -> Result<(), Error> { let candidate_hash = candidate.candidate_hash(); - if self.scope.get_pending_availability(&candidate_hash).is_some() { - return Err(Error::CandidateAlreadyPendingAvailability) - } - if self.best_chain.contains(&candidate_hash) || self.unconnected.contains(&candidate_hash) { return Err(Error::CandidateAlreadyKnown) } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 7dda9ee28775..34cb9f2784a6 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -1233,7 +1233,7 @@ fn test_populate_and_check_potential() { // Cannot add as potential an already pending availability candidate assert_matches!( chain.can_add_candidate_as_potential(&candidate_a_entry), - Err(Error::CandidateAlreadyPendingAvailability) + Err(Error::CandidateAlreadyKnown) ); // Simulate the fact that candidates A, B and C have been included. diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 9f40843a3ebc..27e9ce0ad8cd 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -265,9 +265,7 @@ async fn handle_active_leaves_update( ); match res { - Ok(_) | - Err(FragmentChainError::CandidateAlreadyKnown) | - Err(FragmentChainError::CandidateAlreadyPendingAvailability) => {}, + Ok(_) | Err(FragmentChainError::CandidateAlreadyKnown) => {}, Err(err) => { gum::warn!( target: LOG_TARGET, @@ -502,16 +500,6 @@ async fn handle_introduce_seconded_candidate( ); added = true; }, - Err(FragmentChainError::CandidateAlreadyPendingAvailability) => { - gum::debug!( - target: LOG_TARGET, - para = ?para, - relay_parent = ?leaf, - "Attempting to introduce a candidate which is already pending availability: {:?}", - candidate_hash - ); - added = true; - }, Err(err) => { gum::debug!( target: LOG_TARGET, @@ -719,9 +707,6 @@ fn answer_hypothetical_membership_request( Err(FragmentChainError::CandidateAlreadyKnown) | Ok(()) => { membership.push(*active_leaf); }, - // This will also match if the candidate is already pending availability. - // In this case, we don't need to validate it again or distribute its statements. - // It's already on chain. Err(err) => { gum::debug!( target: LOG_TARGET, diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 825cce92662c..7935207bf6d7 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -2395,14 +2395,12 @@ fn persists_pending_availability_candidate() { }; activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; - // Hypothetical membership for a candidate already pending availability will not return the - // leaves where it's pending availability. A is only pending availability on leaf B. get_hypothetical_membership( &mut virtual_overseer, candidate_hash_a, candidate_a, pvd_a, - vec![leaf_a.hash], + vec![leaf_a.hash, leaf_b.hash], ) .await; From e9da176e1260fe1b497c5a6397163f472806338b Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 2 Aug 2024 12:51:49 +0300 Subject: [PATCH 38/56] fix verbose log --- polkadot/node/network/statement-distribution/src/v2/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index e497bae61ce6..109c29f520c5 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -2238,8 +2238,8 @@ async fn fragment_chain_update_inner( gum::debug!( target: LOG_TARGET, active_leaf_hash = ?active_leaf_hash, - "Calling getHypotheticalMembership from statement distribution: {:?}", - &hypotheticals + "Calling getHypotheticalMembership from statement distribution for candidates: {:?}", + &hypotheticals.iter().map(|hypo| hypo.candidate_hash()).collect::>() ); let candidate_memberships = { let (tx, rx) = oneshot::channel(); From 0c5f6c3bd9578f498ccfd64577d0ec5642fac546 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 2 Aug 2024 15:23:57 +0300 Subject: [PATCH 39/56] fix bug with relay chain forks not getting the candidates from the previous relay parent --- .../core/prospective-parachains/src/lib.rs | 94 ++++++++++++++++--- .../prospective-parachains/src/metrics.rs | 43 +++++++-- 2 files changed, 118 insertions(+), 19 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 27e9ce0ad8cd..3db9ddf860d7 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -74,16 +74,43 @@ const LOG_TARGET: &str = "parachain::prospective-parachains"; struct RelayBlockViewData { // The fragment chains for current and upcoming scheduled paras. fragment_chains: HashMap, + // The relay parent number of this leaf. + number: u32, } struct View { - // Active or recent relay-chain blocks by block hash. + // Active relay-chain blocks by block hash. active_leaves: HashMap, + // Inactive leaves that are still in scope. + implicit_leaves: HashMap, + // The minimum relay parent number allowed as ancestry across all active leaves. All implicit + // and active leaves have a higher relay parent number. + // `None` if it wasn't yet computed. This can be a bit wasteful, as there may be multiple relay + // parents at this height. We could keep track of the paths to each active leaf to optimize + // this. However, the complexity isn't justified. + min_relay_parent: Option, } impl View { + // Initialize with empty values. fn new() -> Self { - View { active_leaves: HashMap::new() } + View { + active_leaves: HashMap::new(), + implicit_leaves: HashMap::new(), + min_relay_parent: None, + } + } + + // Get the fragment chains of this leaf (active or inactive). + fn get_fragment_chains(&self, leaf: &Hash) -> Option<&HashMap> { + self.active_leaves + .get(&leaf) + .map(|view_data| &view_data.fragment_chains) + .or_else(|| { + self.implicit_leaves + .get(leaf) + .map(|inactive_leaf| &inactive_leaf.fragment_chains) + }) } } @@ -169,18 +196,20 @@ async fn handle_active_leaves_update( update: ActiveLeavesUpdate, metrics: &Metrics, ) -> JfyiErrorResult<()> { - // For each active leaf: + // For any active leaf: // - determine the scheduled paras // - pre-populate the candidate storage with pending availability candidates and candidates from - // the parent leaf. + // the parent leaf // - populate the fragment chain // - // Only then, clean up inactive leaves. They must be cleaned only after new leaves are - // processed, because we may reuse their candidates. + // Clean up the leaves from the implicit view that have went out of scope. + // Process the newly-deactivated leaves. Remove them from the active leaves set and keep them as + // implicit leaves if their relay parent is still in scope. let _timer = metrics.time_handle_active_leaves_update(); let mut temp_header_cache = HashMap::new(); + // There can only be one newly activated leaf, `update.activated` is an `Option`. for activated in update.activated.into_iter() { if update.deactivated.contains(&activated.hash) { continue @@ -227,7 +256,7 @@ async fn handle_active_leaves_update( fetch_ancestry(ctx, &mut temp_header_cache, hash, allowed_ancestry_len).await?; let prev_fragment_chains = - ancestry.first().and_then(|prev_leaf| view.active_leaves.get(&prev_leaf.hash)); + ancestry.first().and_then(|prev_leaf| view.get_fragment_chains(&prev_leaf.hash)); let mut fragment_chains = HashMap::new(); for para in scheduled_paras { @@ -305,8 +334,7 @@ async fn handle_active_leaves_update( }; // Get the candidate storage of the parent leaf, if present. - let prev_fragment_chain = - prev_fragment_chains.and_then(|chains| chains.fragment_chains.get(¶)); + let prev_fragment_chain = prev_fragment_chains.and_then(|chains| chains.get(¶)); if let Some(prev_fragment_chain) = prev_fragment_chain { // Add old candidates to the new storage only after we added the pending @@ -362,17 +390,45 @@ async fn handle_active_leaves_update( fragment_chains.insert(para, chain); } - view.active_leaves.insert(hash, RelayBlockViewData { fragment_chains }); + let min_relay_parent = ancestry.last().map_or(block_info.number, |rp| rp.number); + + view.active_leaves + .insert(hash, RelayBlockViewData { fragment_chains, number: block_info.number }); + + view.min_relay_parent = view + .min_relay_parent + .map(|min_rp| std::cmp::min(min_rp, min_relay_parent)) + .or(Some(min_relay_parent)); } - for deactivated in &update.deactivated { - view.active_leaves.remove(deactivated); + // Delete the already inactive leaves that have now went out of scope. + view.implicit_leaves.retain(|_, implicit_leaf| { + matches!( + view.min_relay_parent, + Some(min_relay_parent) if implicit_leaf.number >= min_relay_parent + ) + }); + + for deactivated_leaf in update.deactivated { + // First, remove the leaves from the active set. + let Some(removed) = view.active_leaves.remove(&deactivated_leaf) else { continue }; + // Add the leaf to the implicit view, if the relay parent is still in scope. + if matches!(view.min_relay_parent, Some(min_relay_parent) if removed.number >= min_relay_parent) + { + view.implicit_leaves.insert( + deactivated_leaf, + RelayBlockViewData { + fragment_chains: removed.fragment_chains, + number: removed.number, + }, + ); + } } if metrics.0.is_some() { let mut connected = 0; let mut unconnected = 0; - for RelayBlockViewData { fragment_chains } in view.active_leaves.values() { + for RelayBlockViewData { fragment_chains, .. } in view.active_leaves.values() { for chain in fragment_chains.values() { connected += chain.best_chain_len(); unconnected += chain.unconnected_len(); @@ -380,8 +436,20 @@ async fn handle_active_leaves_update( } metrics.record_candidate_count(connected as u64, unconnected as u64); + + let mut candidates_in_implicit_view = 0; + for RelayBlockViewData { fragment_chains, .. } in view.implicit_leaves.values() { + for chain in fragment_chains.values() { + candidates_in_implicit_view += chain.best_chain_len(); + candidates_in_implicit_view += chain.unconnected_len(); + } + } + + metrics.record_candidate_count_in_implicit_view(candidates_in_implicit_view as u64); } + metrics.record_leaves_count(view.active_leaves.len() as u64, view.implicit_leaves.len() as u64); + Ok(()) } diff --git a/polkadot/node/core/prospective-parachains/src/metrics.rs b/polkadot/node/core/prospective-parachains/src/metrics.rs index 5708e0b54308..4faeeee0a184 100644 --- a/polkadot/node/core/prospective-parachains/src/metrics.rs +++ b/polkadot/node/core/prospective-parachains/src/metrics.rs @@ -17,7 +17,7 @@ use polkadot_node_subsystem::prometheus::Opts; use polkadot_node_subsystem_util::metrics::{ self, - prometheus::{self, GaugeVec, U64}, + prometheus::{self, Gauge, GaugeVec, U64}, }; #[derive(Clone)] @@ -27,6 +27,8 @@ pub(crate) struct MetricsInner { time_candidate_backed: prometheus::Histogram, time_hypothetical_membership: prometheus::Histogram, candidate_count: prometheus::GaugeVec, + active_leaves_count: prometheus::GaugeVec, + implicit_view_candidate_count: prometheus::Gauge, } /// Candidate backing metrics. @@ -68,14 +70,26 @@ impl Metrics { /// candidates count, second param is the unconnected candidates count. pub fn record_candidate_count(&self, connected_count: u64, unconnected_count: u64) { self.0.as_ref().map(|metrics| { - metrics.candidate_count.with_label_values(&["connected"]).set(connected_count) - }); - - self.0.as_ref().map(|metrics| { + metrics.candidate_count.with_label_values(&["connected"]).set(connected_count); metrics .candidate_count .with_label_values(&["unconnected"]) - .set(unconnected_count) + .set(unconnected_count); + }); + } + + /// Record the number of candidates present in the implicit view of the subsystem. + pub fn record_candidate_count_in_implicit_view(&self, count: u64) { + self.0.as_ref().map(|metrics| { + metrics.implicit_view_candidate_count.set(count); + }); + } + + /// Record the number of active/inactive leaves kept by the subsystem. + pub fn record_leaves_count(&self, active_count: u64, inactive_count: u64) { + self.0.as_ref().map(|metrics| { + metrics.active_leaves_count.with_label_values(&["active"]).set(active_count); + metrics.active_leaves_count.with_label_values(&["inactive"]).set(inactive_count); }); } } @@ -121,6 +135,23 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + active_leaves_count: prometheus::register( + GaugeVec::new( + Opts::new( + "polkadot_parachain_prospective_parachains_active_leaves_count", + "Number of leaves kept by the subsystem, split by active/inactive" + ), + &["type"], + )?, + registry, + )?, + implicit_view_candidate_count: prometheus::register( + Gauge::new( + "polkadot_parachain_prospective_parachains_implicit_view_candidate_count", + "Number of candidates present in the implicit view" + )?, + registry + )?, }; Ok(Metrics(Some(metrics))) } From 90b69832b3cf585766009f8a14db7f52ce43ac14 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 2 Aug 2024 16:23:51 +0300 Subject: [PATCH 40/56] log active leaf updates --- polkadot/node/core/prospective-parachains/src/lib.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 3db9ddf860d7..08fbd1d44add 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -208,6 +208,13 @@ async fn handle_active_leaves_update( let _timer = metrics.time_handle_active_leaves_update(); + gum::trace!( + target: LOG_TARGET, + activated = ?update.activated, + deactivated = ?update.deactivated, + "Handle ActiveLeavesUpdate" + ); + let mut temp_header_cache = HashMap::new(); // There can only be one newly activated leaf, `update.activated` is an `Option`. for activated in update.activated.into_iter() { From 797e9b655c18f8cf1880ce96d89fddd2a7d31b17 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 2 Aug 2024 17:20:09 +0300 Subject: [PATCH 41/56] add test for bugfix --- .../core/prospective-parachains/src/tests.rs | 188 +++++++++++------- 1 file changed, 115 insertions(+), 73 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 7935207bf6d7..b3adc30471aa 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -205,6 +205,32 @@ async fn activate_leaf( activate_leaf_with_params(virtual_overseer, leaf, test_state, ASYNC_BACKING_PARAMETERS).await; } +async fn activate_leaf_with_parent_hash_fn( + virtual_overseer: &mut VirtualOverseer, + leaf: &TestLeaf, + test_state: &TestState, + parent_hash_fn: impl Fn(Hash) -> Hash, +) { + let TestLeaf { number, hash, .. } = leaf; + + let activated = new_leaf(*hash, *number); + + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + activated, + )))) + .await; + + handle_leaf_activation( + virtual_overseer, + leaf, + test_state, + ASYNC_BACKING_PARAMETERS, + parent_hash_fn, + ) + .await; +} + async fn activate_leaf_with_params( virtual_overseer: &mut VirtualOverseer, leaf: &TestLeaf, @@ -221,7 +247,14 @@ async fn activate_leaf_with_params( )))) .await; - handle_leaf_activation(virtual_overseer, leaf, test_state, async_backing_params).await; + handle_leaf_activation( + virtual_overseer, + leaf, + test_state, + async_backing_params, + get_parent_hash, + ) + .await; } async fn handle_leaf_activation( @@ -229,6 +262,7 @@ async fn handle_leaf_activation( leaf: &TestLeaf, test_state: &TestState, async_backing_params: AsyncBackingParams, + parent_hash_fn: impl Fn(Hash) -> Hash, ) { let TestLeaf { number, hash, para_data } = leaf; @@ -283,7 +317,7 @@ async fn handle_leaf_activation( let min_min = para_data.iter().map(|(_, data)| data.min_relay_parent).min().unwrap_or(*number); let ancestry_len = number - min_min; let ancestry_hashes: Vec = - std::iter::successors(Some(*hash), |h| Some(get_parent_hash(*h))) + std::iter::successors(Some(*hash), |h| Some(parent_hash_fn(*h))) .skip(1) .take(ancestry_len as usize) .collect(); @@ -523,6 +557,26 @@ async fn get_pvd( assert_eq!(resp, expected_pvd); } +macro_rules! make_and_back_candidate { + ($test_state:ident, $virtual_overseer:ident, $leaf:ident, $parent:expr, $index:expr) => {{ + let (mut candidate, pvd) = make_candidate( + $leaf.hash, + $leaf.number, + 1.into(), + $parent.commitments.head_data.clone(), + HeadData(vec![$index]), + $test_state.validation_code_hash, + ); + // Set a field to make this candidate unique. + candidate.descriptor.para_head = Hash::from_low_u64_le($index); + let candidate_hash = candidate.hash(); + introduce_seconded_candidate(&mut $virtual_overseer, candidate.clone(), pvd).await; + back_candidate(&mut $virtual_overseer, &candidate, candidate_hash).await; + + (candidate, candidate_hash) + }}; +} + #[test] fn should_do_no_work_if_async_backing_disabled_for_leaf() { async fn activate_leaf_async_backing_disabled(virtual_overseer: &mut VirtualOverseer) { @@ -1428,26 +1482,6 @@ fn check_backable_query_single_candidate() { // Backs some candidates and tests `GetBackableCandidates` when requesting a multiple candidates. #[test] fn check_backable_query_multiple_candidates() { - macro_rules! make_and_back_candidate { - ($test_state:ident, $virtual_overseer:ident, $leaf:ident, $parent:expr, $index:expr) => {{ - let (mut candidate, pvd) = make_candidate( - $leaf.hash, - $leaf.number, - 1.into(), - $parent.commitments.head_data.clone(), - HeadData(vec![$index]), - $test_state.validation_code_hash, - ); - // Set a field to make this candidate unique. - candidate.descriptor.para_head = Hash::from_low_u64_le($index); - let candidate_hash = candidate.hash(); - introduce_seconded_candidate(&mut $virtual_overseer, candidate.clone(), pvd).await; - back_candidate(&mut $virtual_overseer, &candidate, candidate_hash).await; - - (candidate, candidate_hash) - }}; - } - let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -2086,6 +2120,7 @@ fn correctly_updates_leaves(#[case] runtime_api_version: u32) { &leaf_c, &test_state, ASYNC_BACKING_PARAMETERS, + get_parent_hash, ) .await; @@ -2152,46 +2187,23 @@ fn handle_active_leaves_update_gets_candidates_from_parent() { HeadData(vec![1]), test_state.validation_code_hash, ); - let (candidate_b, pvd_b) = make_candidate( - leaf_a.hash, - leaf_a.number, - para_id, - HeadData(vec![1]), - HeadData(vec![2]), - test_state.validation_code_hash, - ); - let (candidate_c, pvd_c) = make_candidate( - leaf_a.hash, - leaf_a.number, - para_id, - HeadData(vec![2]), - HeadData(vec![3]), - test_state.validation_code_hash, - ); - let (candidate_d, pvd_d) = make_candidate( - leaf_a.hash, - leaf_a.number, - para_id, - HeadData(vec![3]), - HeadData(vec![4]), - test_state.validation_code_hash, - ); + let candidate_hash_a = candidate_a.hash(); + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; - // Introduce candidates. - introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) - .await; - introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()) - .await; - introduce_seconded_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c.clone()) - .await; - introduce_seconded_candidate(&mut virtual_overseer, candidate_d.clone(), pvd_d.clone()) - .await; + let (candidate_b, candidate_hash_b) = + make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 2); + let (candidate_c, candidate_hash_c) = + make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 3); + let (candidate_d, candidate_hash_d) = + make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_c, 4); - // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. - back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; - back_candidate(&mut virtual_overseer, &candidate_b, candidate_b.hash()).await; - back_candidate(&mut virtual_overseer, &candidate_c, candidate_c.hash()).await; - back_candidate(&mut virtual_overseer, &candidate_d, candidate_d.hash()).await; + let all_candidates_resp = vec![ + (candidate_hash_a, leaf_a.hash), + (candidate_hash_b, leaf_a.hash), + (candidate_hash_c, leaf_a.hash), + (candidate_hash_d, leaf_a.hash), + ]; // Check candidate tree membership. get_backable_candidates( @@ -2200,12 +2212,7 @@ fn handle_active_leaves_update_gets_candidates_from_parent() { para_id, Ancestors::default(), 5, - vec![ - (candidate_a.hash(), leaf_a.hash), - (candidate_b.hash(), leaf_a.hash), - (candidate_c.hash(), leaf_a.hash), - (candidate_d.hash(), leaf_a.hash), - ], + all_candidates_resp.clone(), ) .await; @@ -2276,12 +2283,7 @@ fn handle_active_leaves_update_gets_candidates_from_parent() { para_id, Ancestors::default(), 5, - vec![ - (candidate_a.hash(), leaf_a.hash), - (candidate_b.hash(), leaf_a.hash), - (candidate_c.hash(), leaf_a.hash), - (candidate_d.hash(), leaf_a.hash), - ], + all_candidates_resp.clone(), ) .await; @@ -2308,10 +2310,50 @@ fn handle_active_leaves_update_gets_candidates_from_parent() { ) .await; + // Now add leaf C, which will be a sibling (fork) of leaf B. It should also inherit the + // candidates of leaf A (their common parent). + let leaf_c = TestLeaf { + number: 101, + hash: Hash::from_low_u64_be(12), + para_data: vec![( + para_id, + PerParaData::new_with_pending(98, HeadData(vec![1, 2, 3]), vec![]), + )], + }; + + activate_leaf_with_parent_hash_fn(&mut virtual_overseer, &leaf_c, &test_state, |hash| { + if hash == leaf_c.hash { + leaf_a.hash + } else { + get_parent_hash(hash) + } + }) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + [candidate_a.hash(), candidate_b.hash()].into_iter().collect(), + 5, + vec![(candidate_c.hash(), leaf_a.hash), (candidate_d.hash(), leaf_a.hash)], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_c, + para_id, + Ancestors::new(), + 5, + all_candidates_resp.clone(), + ) + .await; + virtual_overseer }); - assert_eq!(view.active_leaves.len(), 1); + assert_eq!(view.active_leaves.len(), 2); } #[test] From b5eaf54ff7c1e25ef5b9f9aaa3e71fc648bf34d0 Mon Sep 17 00:00:00 2001 From: alindima Date: Mon, 5 Aug 2024 15:33:55 +0300 Subject: [PATCH 42/56] use backing implicit view in prospective-parachains --- .../core/prospective-parachains/src/lib.rs | 209 +++++++++--------- .../src/backing_implicit_view.rs | 76 +++++++ 2 files changed, 179 insertions(+), 106 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 08fbd1d44add..6a7723d1e1ad 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -43,6 +43,7 @@ use polkadot_node_subsystem::{ overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ + backing_implicit_view::{BlockInfoProspectiveParachains as BlockInfo, View as ImplicitView}, inclusion_emulator::{Constraints, RelayChainBlockInfo}, request_session_index_for_child, runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, @@ -74,43 +75,31 @@ const LOG_TARGET: &str = "parachain::prospective-parachains"; struct RelayBlockViewData { // The fragment chains for current and upcoming scheduled paras. fragment_chains: HashMap, - // The relay parent number of this leaf. - number: u32, } struct View { - // Active relay-chain blocks by block hash. - active_leaves: HashMap, - // Inactive leaves that are still in scope. - implicit_leaves: HashMap, - // The minimum relay parent number allowed as ancestry across all active leaves. All implicit - // and active leaves have a higher relay parent number. - // `None` if it wasn't yet computed. This can be a bit wasteful, as there may be multiple relay - // parents at this height. We could keep track of the paths to each active leaf to optimize - // this. However, the complexity isn't justified. - min_relay_parent: Option, + // Per relay parent fragment chains. These includes all relay parents under the implicit view. + per_relay_parent: HashMap, + // The hashes of the currently active leaves. This is a subset of the keys in + // `per_relay_parent`. + active_leaves: HashSet, + // The backing implicit view. + implicit_view: ImplicitView, } impl View { // Initialize with empty values. fn new() -> Self { View { - active_leaves: HashMap::new(), - implicit_leaves: HashMap::new(), - min_relay_parent: None, + per_relay_parent: HashMap::new(), + active_leaves: HashSet::new(), + implicit_view: ImplicitView::default(), } } - // Get the fragment chains of this leaf (active or inactive). + // Get the fragment chains of this leaf. fn get_fragment_chains(&self, leaf: &Hash) -> Option<&HashMap> { - self.active_leaves - .get(&leaf) - .map(|view_data| &view_data.fragment_chains) - .or_else(|| { - self.implicit_leaves - .get(leaf) - .map(|inactive_leaf| &inactive_leaf.fragment_chains) - }) + self.per_relay_parent.get(&leaf).map(|view_data| &view_data.fragment_chains) } } @@ -196,15 +185,15 @@ async fn handle_active_leaves_update( update: ActiveLeavesUpdate, metrics: &Metrics, ) -> JfyiErrorResult<()> { - // For any active leaf: + // For any new active leaf: // - determine the scheduled paras // - pre-populate the candidate storage with pending availability candidates and candidates from // the parent leaf // - populate the fragment chain + // - add it to the implicit view // - // Clean up the leaves from the implicit view that have went out of scope. - // Process the newly-deactivated leaves. Remove them from the active leaves set and keep them as - // implicit leaves if their relay parent is still in scope. + // Then mark the newly-deactivated leaves as deactivated and update the implicit view. + // Finally, remove any relay parents that are no longer part of the implicit view. let _timer = metrics.time_handle_active_leaves_update(); @@ -242,22 +231,21 @@ async fn handle_active_leaves_update( let scheduled_paras = fetch_upcoming_paras(ctx, hash).await?; - let block_info: RelayChainBlockInfo = - match fetch_block_info(ctx, &mut temp_header_cache, hash).await? { - None => { - gum::warn!( - target: LOG_TARGET, - block_hash = ?hash, - "Failed to get block info for newly activated leaf block." - ); + let block_info = match fetch_block_info(ctx, &mut temp_header_cache, hash).await? { + None => { + gum::warn!( + target: LOG_TARGET, + block_hash = ?hash, + "Failed to get block info for newly activated leaf block." + ); - // `update.activated` is an option, but we can use this - // to exit the 'loop' and skip this block without skipping - // pruning logic. - continue - }, - Some(info) => info, - }; + // `update.activated` is an option, but we can use this + // to exit the 'loop' and skip this block without skipping + // pruning logic. + continue + }, + Some(info) => info, + }; let ancestry = fetch_ancestry(ctx, &mut temp_header_cache, hash, allowed_ancestry_len).await?; @@ -319,11 +307,14 @@ async fn handle_active_leaves_update( } let scope = match FragmentChainScope::with_ancestors( - block_info.clone(), + block_info.clone().into(), constraints, compact_pending, max_candidate_depth, - ancestry.iter().cloned(), + ancestry + .iter() + .map(|a| RelayChainBlockInfo::from(a.clone())) + .collect::>(), ) { Ok(scope) => scope, Err(unexpected_ancestors) => { @@ -397,65 +388,52 @@ async fn handle_active_leaves_update( fragment_chains.insert(para, chain); } - let min_relay_parent = ancestry.last().map_or(block_info.number, |rp| rp.number); + view.per_relay_parent.insert(hash, RelayBlockViewData { fragment_chains }); - view.active_leaves - .insert(hash, RelayBlockViewData { fragment_chains, number: block_info.number }); + view.active_leaves.insert(hash); - view.min_relay_parent = view - .min_relay_parent - .map(|min_rp| std::cmp::min(min_rp, min_relay_parent)) - .or(Some(min_relay_parent)); + view.implicit_view + .activate_leaf_from_prospective_parachains(block_info, &ancestry); } - // Delete the already inactive leaves that have now went out of scope. - view.implicit_leaves.retain(|_, implicit_leaf| { - matches!( - view.min_relay_parent, - Some(min_relay_parent) if implicit_leaf.number >= min_relay_parent - ) - }); - - for deactivated_leaf in update.deactivated { - // First, remove the leaves from the active set. - let Some(removed) = view.active_leaves.remove(&deactivated_leaf) else { continue }; - // Add the leaf to the implicit view, if the relay parent is still in scope. - if matches!(view.min_relay_parent, Some(min_relay_parent) if removed.number >= min_relay_parent) - { - view.implicit_leaves.insert( - deactivated_leaf, - RelayBlockViewData { - fragment_chains: removed.fragment_chains, - number: removed.number, - }, - ); - } + for deactivated in update.deactivated { + view.active_leaves.remove(&deactivated); + view.implicit_view.deactivate_leaf(deactivated); } - if metrics.0.is_some() { - let mut connected = 0; - let mut unconnected = 0; - for RelayBlockViewData { fragment_chains, .. } in view.active_leaves.values() { - for chain in fragment_chains.values() { - connected += chain.best_chain_len(); - unconnected += chain.unconnected_len(); - } - } + { + let remaining: HashSet<_> = view.implicit_view.all_allowed_relay_parents().collect(); - metrics.record_candidate_count(connected as u64, unconnected as u64); + view.per_relay_parent.retain(|r, _| remaining.contains(&r)); + } + if metrics.0.is_some() { + let mut active_connected = 0; + let mut active_unconnected = 0; let mut candidates_in_implicit_view = 0; - for RelayBlockViewData { fragment_chains, .. } in view.implicit_leaves.values() { - for chain in fragment_chains.values() { - candidates_in_implicit_view += chain.best_chain_len(); - candidates_in_implicit_view += chain.unconnected_len(); + + for (hash, RelayBlockViewData { fragment_chains, .. }) in view.per_relay_parent.iter() { + if view.active_leaves.contains(hash) { + for chain in fragment_chains.values() { + active_connected += chain.best_chain_len(); + active_unconnected += chain.unconnected_len(); + } + } else { + for chain in fragment_chains.values() { + candidates_in_implicit_view += chain.best_chain_len(); + candidates_in_implicit_view += chain.unconnected_len(); + } } } + metrics.record_candidate_count(active_connected as u64, active_unconnected as u64); metrics.record_candidate_count_in_implicit_view(candidates_in_implicit_view as u64); } - metrics.record_leaves_count(view.active_leaves.len() as u64, view.implicit_leaves.len() as u64); + let num_active_leaves = view.active_leaves.len() as u64; + let num_inactive_leaves = + (view.per_relay_parent.len() as u64).saturating_sub(num_active_leaves); + metrics.record_leaves_count(num_active_leaves, num_inactive_leaves); Ok(()) } @@ -508,7 +486,7 @@ async fn preprocess_candidates_pending_availability( }, compact: fragment_chain::PendingAvailability { candidate_hash: pending.candidate_hash, - relay_parent, + relay_parent: relay_parent.into(), }, }); @@ -550,7 +528,9 @@ async fn handle_introduce_seconded_candidate( let mut added = false; let mut para_scheduled = false; - for (leaf, leaf_data) in view.active_leaves.iter_mut() { + for leaf in view.active_leaves.iter() { + let Some(leaf_data) = view.per_relay_parent.get_mut(leaf) else { continue }; + if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { para_scheduled = true; @@ -620,7 +600,9 @@ async fn handle_candidate_backed( let mut found_candidate = false; let mut found_para = false; - for (leaf, leaf_data) in view.active_leaves.iter_mut() { + for leaf in view.active_leaves.iter() { + let Some(leaf_data) = view.per_relay_parent.get_mut(leaf) else { continue }; + if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { found_para = true; if chain.is_candidate_backed(&candidate_hash) { @@ -691,7 +673,7 @@ fn answer_get_backable_candidates( ancestors: Ancestors, tx: oneshot::Sender>, ) { - let Some(data) = view.active_leaves.get(&relay_parent) else { + if !view.active_leaves.contains(&relay_parent) { gum::debug!( target: LOG_TARGET, ?relay_parent, @@ -699,6 +681,17 @@ fn answer_get_backable_candidates( "Requested backable candidate for inactive relay-parent." ); + let _ = tx.send(vec![]); + return + } + let Some(data) = view.per_relay_parent.get(&relay_parent) else { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Requested backable candidate for inexistent relay-parent." + ); + let _ = tx.send(vec![]); return }; @@ -768,11 +761,12 @@ fn answer_hypothetical_membership_request( } let required_active_leaf = request.fragment_chain_relay_parent; - for (active_leaf, leaf_view) in view + for active_leaf in view .active_leaves .iter() - .filter(|(h, _)| required_active_leaf.as_ref().map_or(true, |x| h == &x)) + .filter(|h| required_active_leaf.as_ref().map_or(true, |x| h == &x)) { + let Some(leaf_view) = view.per_relay_parent.get(&active_leaf) else { continue }; for &mut (ref candidate, ref mut membership) in &mut response { let para_id = &candidate.candidate_para(); let Some(fragment_chain) = leaf_view.fragment_chains.get(para_id) else { continue }; @@ -805,9 +799,11 @@ fn answer_minimum_relay_parents_request( tx: oneshot::Sender>, ) { let mut v = Vec::new(); - if let Some(leaf_data) = view.active_leaves.get(&relay_parent) { - for (para_id, fragment_chain) in &leaf_data.fragment_chains { - v.push((*para_id, fragment_chain.scope().earliest_relay_parent().number)); + if view.active_leaves.contains(&relay_parent) { + if let Some(leaf_data) = view.per_relay_parent.get(&relay_parent) { + for (para_id, fragment_chain) in &leaf_data.fragment_chains { + v.push((*para_id, fragment_chain.scope().earliest_relay_parent().number)); + } } } @@ -829,11 +825,11 @@ fn answer_prospective_validation_data_request( let mut relay_parent_info = None; let mut max_pov_size = None; - for fragment_chain in view - .active_leaves - .values() - .filter_map(|x| x.fragment_chains.get(&request.para_id)) - { + for fragment_chain in view.active_leaves.iter().filter_map(|x| { + view.per_relay_parent + .get(&x) + .and_then(|data| data.fragment_chains.get(&request.para_id)) + }) { if head_data.is_some() && relay_parent_info.is_some() && max_pov_size.is_some() { break } @@ -944,7 +940,7 @@ async fn fetch_ancestry( cache: &mut HashMap, relay_hash: Hash, ancestors: usize, -) -> JfyiErrorResult> { +) -> JfyiErrorResult> { if ancestors == 0 { return Ok(Vec::new()) } @@ -1023,12 +1019,13 @@ async fn fetch_block_info( ctx: &mut Context, cache: &mut HashMap, relay_hash: Hash, -) -> JfyiErrorResult> { +) -> JfyiErrorResult> { let header = fetch_block_header_with_cache(ctx, cache, relay_hash).await?; - Ok(header.map(|header| RelayChainBlockInfo { + Ok(header.map(|header| BlockInfo { hash: relay_hash, number: header.number, + parent_hash: header.parent_hash, storage_root: header.state_root, })) } diff --git a/polkadot/node/subsystem-util/src/backing_implicit_view.rs b/polkadot/node/subsystem-util/src/backing_implicit_view.rs index 23a758d25715..a805ef8165e5 100644 --- a/polkadot/node/subsystem-util/src/backing_implicit_view.rs +++ b/polkadot/node/subsystem-util/src/backing_implicit_view.rs @@ -25,6 +25,7 @@ use polkadot_primitives::{BlockNumber, Hash, Id as ParaId}; use std::collections::HashMap; use crate::{ + inclusion_emulator::RelayChainBlockInfo, request_session_index_for_child, runtime::{self, prospective_parachains_mode, recv_runtime, ProspectiveParachainsMode}, }; @@ -121,6 +122,26 @@ struct BlockInfo { parent_hash: Hash, } +/// Information about a relay-chain block, to be used when calling this module from prospective +/// parachains. +#[derive(Debug, Clone, PartialEq)] +pub struct BlockInfoProspectiveParachains { + /// The hash of the relay-chain block. + pub hash: Hash, + /// The hash of the parent relay-chain block. + pub parent_hash: Hash, + /// The number of the relay-chain block. + pub number: BlockNumber, + /// The storage-root of the relay-chain block. + pub storage_root: Hash, +} + +impl From for RelayChainBlockInfo { + fn from(value: BlockInfoProspectiveParachains) -> Self { + Self { hash: value.hash, number: value.number, storage_root: value.storage_root } + } +} + impl View { /// Get an iterator over active leaves in the view. pub fn leaves(&self) -> impl Iterator { @@ -178,6 +199,61 @@ impl View { } } + /// Activate a leaf in the view. To be used by the prospective parachains subsystem. + /// + /// This will not request any additional data, as prospective parachains already provides all + /// the required info. + /// NOTE: using `activate_leaf` instead of this function will result in a + /// deadlock, as it calls prospective-parachains under the hood. + /// + /// No-op for known leaves. + pub fn activate_leaf_from_prospective_parachains( + &mut self, + leaf: BlockInfoProspectiveParachains, + ancestors: &[BlockInfoProspectiveParachains], + ) { + if self.leaves.contains_key(&leaf.hash) { + return + } + + // Retain at least `MINIMUM_RETAIN_LENGTH` blocks in storage. + // This helps to avoid Chain API calls when activating leaves in the + // same chain. + let retain_minimum = std::cmp::min( + ancestors.last().map(|a| a.number).unwrap_or(0), + leaf.number.saturating_sub(MINIMUM_RETAIN_LENGTH), + ); + + self.leaves.insert(leaf.hash, ActiveLeafPruningInfo { retain_minimum }); + let mut allowed_relay_parents = AllowedRelayParents { + allowed_relay_parents_contiguous: Vec::with_capacity(ancestors.len()), + // In this case, initialise this to an empty map, as prospective parachains already has + // this data and it won't query the implicit view for it. + minimum_relay_parents: HashMap::new(), + }; + + for ancestor in ancestors { + self.block_info_storage.insert( + ancestor.hash, + BlockInfo { + block_number: ancestor.number, + maybe_allowed_relay_parents: None, + parent_hash: ancestor.parent_hash, + }, + ); + allowed_relay_parents.allowed_relay_parents_contiguous.push(ancestor.hash); + } + + self.block_info_storage.insert( + leaf.hash, + BlockInfo { + block_number: leaf.number, + maybe_allowed_relay_parents: Some(allowed_relay_parents), + parent_hash: leaf.parent_hash, + }, + ); + } + /// Deactivate a leaf in the view. This prunes any outdated implicit ancestors as well. /// /// Returns hashes of blocks pruned from storage. From 5166663d8bf2569d97eb4df7f11aba6cf6e580d9 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 7 Aug 2024 11:29:08 +0300 Subject: [PATCH 43/56] update some comments --- .../core/prospective-parachains/src/fragment_chain/mod.rs | 5 +++-- polkadot/node/core/prospective-parachains/src/lib.rs | 2 -- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index d24fc99237b9..c0cedc0cef16 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -173,7 +173,8 @@ pub(crate) enum Error { } /// The rule for selecting between two backed candidate forks, when adding to the chain. -/// All validators should adhere to this rule. +/// All validators should adhere to this rule, in order to not lose out on rewards in case of +/// forking parachains. fn fork_selection_rule(hash1: &CandidateHash, hash2: &CandidateHash) -> Ordering { hash1.cmp(hash2) } @@ -1014,7 +1015,7 @@ impl FragmentChain { } // Once the backable chain was populated, trim the forks generated by candidates which - // are not present in the best chain. Fan out this into a full breadth-first search. + // are not present in the best chain. Fan this out into a full breadth-first search. fn trim_uneligible_forks(&self, storage: &mut CandidateStorage) { // Start out with the candidates in the chain. They are all valid candidates. let mut queue: VecDeque<_> = diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 6a7723d1e1ad..714a30a163b9 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -349,8 +349,6 @@ async fn handle_active_leaves_update( .get_pending_availability(&candidate.hash()) .is_none() { - // We need to swallow any potential errors here, as they can happen under - // normal operation, with candidates becoming out of scope for example. let _ = new_storage.add_candidate_entry(candidate); } } From 9a4b88dc328735dd0dc23e0180d2a01f92e9f35d Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 7 Aug 2024 12:55:06 +0300 Subject: [PATCH 44/56] address some comments --- .../src/fragment_chain/mod.rs | 44 +++++++++++++------ .../src/fragment_chain/tests.rs | 26 +++++------ .../core/prospective-parachains/src/lib.rs | 22 +++++----- 3 files changed, 53 insertions(+), 39 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index c0cedc0cef16..5e60b702017b 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -254,16 +254,13 @@ impl CandidateStorage { } } - /// Note that an existing candidate has been backed. Return false if the candidate was not - /// found. - fn mark_backed(&mut self, candidate_hash: &CandidateHash) -> bool { + /// Note that an existing candidate has been backed. + fn mark_backed(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { gum::trace!(target: LOG_TARGET, ?candidate_hash, "Candidate marked as backed"); entry.state = CandidateState::Backed; - true } else { gum::trace!(target: LOG_TARGET, ?candidate_hash, "Candidate not found while marking as backed"); - false } } @@ -559,6 +556,7 @@ impl Scope { } } +#[cfg_attr(test, derive(Clone))] pub struct FragmentNode { fragment: Fragment, candidate_hash: CandidateHash, @@ -576,6 +574,7 @@ impl FragmentNode { /// A candidate chain of backed/backable candidates. /// Includes the candidates pending availability and candidates which may be backed on-chain. #[derive(Default)] +#[cfg_attr(test, derive(Clone))] struct BackedChain { // Holds the candidate chain. chain: Vec, @@ -609,6 +608,7 @@ impl BackedChain { /// It holds the current best backable candidate chain, as well as potential candidates /// which could become connected to the chain in the future or which could even overwrite the /// existing chain. +#[cfg_attr(test, derive(Clone))] pub(crate) struct FragmentChain { // The current scope, which dictates the on-chain operating constraints that all future // candidates must adhere to. @@ -708,6 +708,25 @@ impl FragmentChain { storage } + /// Consume `self` and return the [`Scope`] and a new [`CandidateStorage`] containing all the + /// candidates from this `FragmentChain`, as well as the unconnected ones. + pub fn into_candidate_storage_and_scope(self) -> (CandidateStorage, Scope) { + let mut storage = self.unconnected; + + for candidate in self.best_chain.chain { + let _ = storage.add_candidate_entry(CandidateEntry { + candidate_hash: candidate.candidate_hash, + parent_head_data_hash: candidate.parent_head_data_hash, + output_head_data_hash: candidate.output_head_data_hash, + relay_parent: candidate.relay_parent(), + candidate: candidate.fragment.candidate_clone(), + state: CandidateState::Backed, + }); + } + + (storage, self.scope) + } + /// Try getting the full head data associated with this hash. pub fn get_head_data_by_hash(&self, head_data_hash: &Hash) -> Option { // First, see if this is the head data of the latest included candidate. @@ -1225,18 +1244,15 @@ impl FragmentChain { } } - /// Mark a candidate as backed. Return `None` if the candidate is not part of the unconnected - /// storage. - /// This will trigger a recreation of the best backable chain. - pub fn candidate_backed(&self, newly_backed_candidate: &CandidateHash) -> Option { + /// Mark a candidate as backed. The caller should make sure that the candidate is present in the + /// unconnected storage. This will trigger a recreation of the best backable chain. + pub fn candidate_backed(self, newly_backed_candidate: &CandidateHash) -> Self { // Get the storage containing both the backable chain and the unconnected candidates. - let mut old_storage = self.as_candidate_storage(); + let (mut old_storage, scope) = self.into_candidate_storage_and_scope(); - if !old_storage.mark_backed(newly_backed_candidate) { - return None - } + old_storage.mark_backed(newly_backed_candidate); // Repopulate. - Some(Self::populate(self.scope.clone(), old_storage)) + Self::populate(scope, old_storage) } } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 34cb9f2784a6..dfa363339565 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -278,7 +278,6 @@ fn candidate_storage_methods() { ); } assert!(!storage.contains(&candidate_hash)); - assert_eq!(storage.mark_backed(&candidate_hash), false); assert_eq!(storage.possible_backed_para_children(&parent_head_hash).count(), 0); assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); @@ -302,9 +301,9 @@ fn candidate_storage_methods() { assert_eq!(storage.head_data_by_hash(&parent_head_hash).unwrap(), &pvd.parent_head); // Now mark it as backed - assert_eq!(storage.mark_backed(&candidate_hash), true); + storage.mark_backed(&candidate_hash); // Marking it twice is fine. - assert_eq!(storage.mark_backed(&candidate_hash), true); + storage.mark_backed(&candidate_hash); assert_eq!( storage .possible_backed_para_children(&parent_head_hash) @@ -1146,7 +1145,7 @@ fn test_populate_and_check_potential() { // Simulate a best chain reorg by backing a2. { - let chain = chain.candidate_backed(&candidate_a2_hash).unwrap(); + let chain = chain.candidate_backed(&candidate_a2_hash); assert_eq!(chain.best_chain_vec(), vec![candidate_a2_hash, candidate_b2_hash]); // F is kept as it was truly unconnected. The rest will be trimmed. assert_eq!( @@ -1263,7 +1262,7 @@ fn test_populate_and_check_potential() { ); // Mark E as backed. F will be dropped for invalid watermark. No other unconnected candidates. - let chain = chain.candidate_backed(&candidate_e_hash).unwrap(); + let chain = chain.candidate_backed(&candidate_e_hash); assert_eq!(chain.best_chain_vec(), vec![candidate_d_hash, candidate_e_hash]); assert_eq!(chain.unconnected_len(), 0); @@ -1380,29 +1379,30 @@ fn test_find_ancestor_path_and_find_backable_chain() { // Do tests with only a couple of candidates being backed. { - let chain = chain.candidate_backed(&&candidates[5]).unwrap(); + let chain = chain.clone(); + let chain = chain.candidate_backed(&&candidates[5]); for count in 0..10 { assert_eq!(chain.find_backable_chain(Ancestors::new(), count).len(), 0); } - let chain = chain.candidate_backed(&&candidates[3]).unwrap(); - let chain = chain.candidate_backed(&&candidates[4]).unwrap(); + let chain = chain.candidate_backed(&&candidates[3]); + let chain = chain.candidate_backed(&&candidates[4]); for count in 0..10 { assert_eq!(chain.find_backable_chain(Ancestors::new(), count).len(), 0); } - let chain = chain.candidate_backed(&&candidates[1]).unwrap(); + let chain = chain.candidate_backed(&&candidates[1]); for count in 0..10 { assert_eq!(chain.find_backable_chain(Ancestors::new(), count).len(), 0); } - let chain = chain.candidate_backed(&&candidates[0]).unwrap(); + let chain = chain.candidate_backed(&&candidates[0]); assert_eq!(chain.find_backable_chain(Ancestors::new(), 1), hashes(0..1)); for count in 2..10 { assert_eq!(chain.find_backable_chain(Ancestors::new(), count), hashes(0..2)); } // Now back the missing piece. - let chain = chain.candidate_backed(&&candidates[2]).unwrap(); + let chain = chain.candidate_backed(&&candidates[2]); assert_eq!(chain.best_chain_len(), 6); for count in 0..10 { assert_eq!( @@ -1419,8 +1419,8 @@ fn test_find_ancestor_path_and_find_backable_chain() { let mut candidates_shuffled = candidates.clone(); candidates_shuffled.shuffle(&mut thread_rng()); for candidate in candidates.iter() { - chain = chain.candidate_backed(candidate).unwrap(); - assert!(storage.mark_backed(candidate)); + chain = chain.candidate_backed(candidate); + storage.mark_backed(candidate); } // No ancestors supplied. diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 714a30a163b9..9cea6e064118 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -28,7 +28,7 @@ #![deny(unused_crate_dependencies)] -use std::collections::{HashMap, HashSet}; +use std::collections::{hash_map::Entry, HashMap, HashSet}; use fragment_chain::CandidateStorage; use futures::{channel::oneshot, prelude::*}; @@ -601,9 +601,9 @@ async fn handle_candidate_backed( for leaf in view.active_leaves.iter() { let Some(leaf_data) = view.per_relay_parent.get_mut(leaf) else { continue }; - if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { + if let Entry::Occupied(chain) = leaf_data.fragment_chains.entry(para) { found_para = true; - if chain.is_candidate_backed(&candidate_hash) { + if chain.get().is_candidate_backed(&candidate_hash) { gum::debug!( target: LOG_TARGET, para_id = ?para, @@ -611,17 +611,17 @@ async fn handle_candidate_backed( "Received redundant instruction to mark as backed an already backed candidate", ); found_candidate = true; - } else if chain.contains_unconnected_candidate(&candidate_hash) { + } else if chain.get().contains_unconnected_candidate(&candidate_hash) { found_candidate = true; - // Now that a candidate was backed, attempt to recreate the fragment chain. - let maybe_new_chain = chain.candidate_backed(&candidate_hash); + // Now that a candidate was backed, recreate the fragment chain. + let new_chain = chain.remove().candidate_backed(&candidate_hash); gum::trace!( target: LOG_TARGET, relay_parent = ?leaf, para_id = ?para, "Candidate backed. Candidate chain for para: {:?}", - maybe_new_chain.as_ref().unwrap_or(chain).best_chain_vec() + new_chain.best_chain_vec() ); gum::trace!( @@ -629,13 +629,11 @@ async fn handle_candidate_backed( relay_parent = ?leaf, para_id = ?para, "Potential candidate storage for para: {:?}", - maybe_new_chain.as_ref().unwrap_or(chain).unconnected().map(|candidate| candidate.hash()).collect::>() + new_chain.unconnected().map(|candidate| candidate.hash()).collect::>() ); - // Replace the old chain with the new one. - if let Some(new_chain) = maybe_new_chain { - *chain = new_chain; - } + // We removed the old chain, add the new one. + leaf_data.fragment_chains.insert(para, new_chain); } } } From 4ba7b0f5840a4fd11b2d2fbd42f92d642a19dd79 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 7 Aug 2024 14:41:25 +0300 Subject: [PATCH 45/56] a bit of refactoring --- .../src/fragment_chain/mod.rs | 31 +++++++++++++------ .../src/fragment_chain/tests.rs | 7 +---- .../core/prospective-parachains/src/lib.rs | 15 ++------- 3 files changed, 24 insertions(+), 29 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 5e60b702017b..4ec8551a7d73 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -690,19 +690,30 @@ impl FragmentChain { } /// Return a new [`CandidateStorage`] containing all the candidates from this `FragmentChain`, - /// as well as the unconnected ones. - pub fn as_candidate_storage(&self) -> CandidateStorage { + /// as well as the unconnected ones. This does not contain the candidates that used to be + /// pending availability. + pub fn advance_scope(&self) -> CandidateStorage { let mut storage = self.unconnected.clone(); for candidate in self.best_chain.chain.iter() { - let _ = storage.add_candidate_entry(CandidateEntry { - candidate_hash: candidate.candidate_hash, - parent_head_data_hash: candidate.parent_head_data_hash, - output_head_data_hash: candidate.output_head_data_hash, - relay_parent: candidate.relay_parent(), - candidate: candidate.fragment.candidate_clone(), // This clone is very cheap. - state: CandidateState::Backed, - }); + // If they used to be pending availability, don't add them. This is fine + // because: + // - if they still are pending availability, they have already been added to the new + // storage. + // - if they were included, no point in keeping them. + // + // This cannot happen for the candidates in the unconnected storage. The pending + // availability candidates will always be part of the best chain. + if self.scope.get_pending_availability(&candidate.candidate_hash).is_none() { + let _ = storage.add_candidate_entry(CandidateEntry { + candidate_hash: candidate.candidate_hash, + parent_head_data_hash: candidate.parent_head_data_hash, + output_head_data_hash: candidate.output_head_data_hash, + relay_parent: candidate.relay_parent(), + candidate: candidate.fragment.candidate_clone(), // This clone is very cheap. + state: CandidateState::Backed, + }); + } } storage diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index dfa363339565..1e963520ea1b 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -1237,12 +1237,7 @@ fn test_populate_and_check_potential() { // Simulate the fact that candidates A, B and C have been included. - let mut new_storage = chain.as_candidate_storage(); - // We need to remove the candidates that used to be pending availability. This is what the - // subsystem is doing. - for candidate in scope.pending_availability { - new_storage.remove_candidate(&candidate.candidate_hash); - } + let new_storage = chain.advance_scope(); let base_constraints = make_constraints(0, vec![0], HeadData(vec![0x0d])); let scope = Scope::with_ancestors( diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 9cea6e064118..006c7fd9f3c7 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -338,19 +338,8 @@ async fn handle_active_leaves_update( // Add old candidates to the new storage only after we added the pending // availability candidates. The pending candidates have higher priority and can // conflict with the old candidates. - for candidate in prev_fragment_chain.as_candidate_storage().into_candidates() { - // If they used to be pending availability, don't add them. This is fine - // because: - // - if they still are pending availability, they have already been added to the - // new storage. - // - if they were included, no point in keeping them. - if prev_fragment_chain - .scope() - .get_pending_availability(&candidate.hash()) - .is_none() - { - let _ = new_storage.add_candidate_entry(candidate); - } + for candidate in prev_fragment_chain.advance_scope().into_candidates() { + let _ = new_storage.add_candidate_entry(candidate); } } From 021515ebc3169dc769c57d22e0b1878a89274ff9 Mon Sep 17 00:00:00 2001 From: alindima Date: Wed, 7 Aug 2024 16:46:34 +0300 Subject: [PATCH 46/56] optimise candidate_backed --- .../src/fragment_chain/mod.rs | 151 +++++++++++++----- .../src/fragment_chain/tests.rs | 23 +-- .../core/prospective-parachains/src/lib.rs | 19 +-- 3 files changed, 135 insertions(+), 58 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 4ec8551a7d73..ee716ffe4240 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -119,7 +119,7 @@ mod tests; use std::{ - cmp::Ordering, + cmp::{min, Ordering}, collections::{ hash_map::{Entry, HashMap}, BTreeMap, HashSet, VecDeque, @@ -598,6 +598,39 @@ impl BackedChain { self.chain.push(candidate); } + fn clear(&mut self) -> Vec { + self.by_parent_head.clear(); + self.by_output_head.clear(); + self.candidates.clear(); + + std::mem::take(&mut self.chain) + } + + fn revert_to_parent_hash<'a>( + &'a mut self, + parent_head_data_hash: &Hash, + ) -> impl Iterator + 'a { + let mut found_index = None; + for index in 0..self.chain.len() { + let node = &self.chain[0]; + + if found_index.is_some() { + self.by_parent_head.remove(&node.parent_head_data_hash); + self.by_output_head.remove(&node.output_head_data_hash); + self.candidates.remove(&node.candidate_hash); + } else if &node.output_head_data_hash == parent_head_data_hash { + found_index = Some(index); + } + } + + if let Some(index) = found_index { + self.chain.drain(min(index + 1, self.chain.len())..) + } else { + // Don't remove anything, but use drain to satisfy the compiler. + self.chain.drain(0..0) + } + } + fn contains(&self, hash: &CandidateHash) -> bool { self.candidates.contains(hash) } @@ -642,7 +675,7 @@ impl FragmentChain { // Now that we picked the best backable chain, trim the forks generated by candidates which // are not present in the best chain. - fragment_chain.trim_uneligible_forks(&mut prev_storage); + fragment_chain.trim_uneligible_forks(&mut prev_storage, None); // Finally, keep any candidates which haven't been trimmed but still have potential. fragment_chain.populate_unconnected_potential_candidates(prev_storage); @@ -719,25 +752,6 @@ impl FragmentChain { storage } - /// Consume `self` and return the [`Scope`] and a new [`CandidateStorage`] containing all the - /// candidates from this `FragmentChain`, as well as the unconnected ones. - pub fn into_candidate_storage_and_scope(self) -> (CandidateStorage, Scope) { - let mut storage = self.unconnected; - - for candidate in self.best_chain.chain { - let _ = storage.add_candidate_entry(CandidateEntry { - candidate_hash: candidate.candidate_hash, - parent_head_data_hash: candidate.parent_head_data_hash, - output_head_data_hash: candidate.output_head_data_hash, - relay_parent: candidate.relay_parent(), - candidate: candidate.fragment.candidate_clone(), - state: CandidateState::Backed, - }); - } - - (storage, self.scope) - } - /// Try getting the full head data associated with this hash. pub fn get_head_data_by_hash(&self, head_data_hash: &Hash) -> Option { // First, see if this is the head data of the latest included candidate. @@ -1046,13 +1060,19 @@ impl FragmentChain { // Once the backable chain was populated, trim the forks generated by candidates which // are not present in the best chain. Fan this out into a full breadth-first search. - fn trim_uneligible_forks(&self, storage: &mut CandidateStorage) { + fn trim_uneligible_forks(&self, storage: &mut CandidateStorage, starting_point: Option) { // Start out with the candidates in the chain. They are all valid candidates. - let mut queue: VecDeque<_> = - self.best_chain.chain.iter().map(|c| (c.parent_head_data_hash, true)).collect(); - if queue.is_empty() { - queue.push_front((self.scope.base_constraints.required_parent.hash(), true)); - } + let mut queue: VecDeque<_> = if let Some(starting_point) = starting_point { + [(starting_point, true)].into_iter().collect() + } else { + if self.best_chain.chain.is_empty() { + [(self.scope.base_constraints.required_parent.hash(), true)] + .into_iter() + .collect() + } else { + self.best_chain.chain.iter().map(|c| (c.parent_head_data_hash, true)).collect() + } + }; // To make sure that cycles don't make us loop forever, keep track of the visited parent // heads. let mut visited = HashSet::new(); @@ -1126,9 +1146,9 @@ impl FragmentChain { }; let required_head_hash = child_constraints.required_parent.hash(); + // Select the few possible backed/backable children which can be added to the chain // right now. - let possible_children = storage .possible_backed_para_children(&required_head_hash) .filter_map(|candidate| { @@ -1255,15 +1275,74 @@ impl FragmentChain { } } - /// Mark a candidate as backed. The caller should make sure that the candidate is present in the - /// unconnected storage. This will trigger a recreation of the best backable chain. - pub fn candidate_backed(self, newly_backed_candidate: &CandidateHash) -> Self { - // Get the storage containing both the backable chain and the unconnected candidates. - let (mut old_storage, scope) = self.into_candidate_storage_and_scope(); + /// Mark a candidate as backed. This can trigger a recreation of the best backable chain. + pub fn candidate_backed(&mut self, newly_backed_candidate: &CandidateHash) { + // Already backed. + if self.best_chain.candidates.contains(newly_backed_candidate) { + return + } + let Some(parent_head_hash) = self + .unconnected + .by_candidate_hash + .get(newly_backed_candidate) + .map(|entry| entry.parent_head_data_hash) + else { + // Candidate is not in unconnected storage. + return + }; + + // Mark the candidate hash. + self.unconnected.mark_backed(newly_backed_candidate); + + // Revert to parent_head_hash + if !self.revert_to(&parent_head_hash) { + // If nothing was reverted, there is nothing we can do for now. + return + } + + let mut prev_storage = std::mem::take(&mut self.unconnected); + + // Populate the chain. + self.populate_chain(&mut prev_storage); + + // Now that we picked the best backable chain, trim the forks generated by candidates + // which are not present in the best chain. We can start trimming from this candidate + // onwards. + self.trim_uneligible_forks(&mut prev_storage, Some(parent_head_hash)); + + // Finally, keep any candidates which haven't been trimmed but still have potential. + self.populate_unconnected_potential_candidates(prev_storage); + } + + // Revert the best backable chain so that the last candidate will be one outputting the given + // `parent_head_hash`. If the `parent_head_hash` is exactly the required parent of the base + // constraints (builds on the latest included candidate), revert the entire chain. + // Return false if we couldn't find the parent head hash. + fn revert_to(&mut self, parent_head_hash: &Hash) -> bool { + let mut removed_items = None; + if &self.scope.base_constraints.required_parent.hash() == parent_head_hash { + removed_items = Some(self.best_chain.clear()); + } - old_storage.mark_backed(newly_backed_candidate); + if removed_items.is_none() && self.best_chain.by_output_head.contains_key(parent_head_hash) + { + removed_items = Some(self.best_chain.revert_to_parent_hash(parent_head_hash).collect()); + } - // Repopulate. - Self::populate(scope, old_storage) + let Some(removed_items) = removed_items else { return false }; + + // Even if it's empty, we need to return true, because we'll be able to add a new candidate + // to the chain. + for node in removed_items { + let _ = self.unconnected.add_candidate_entry(CandidateEntry { + candidate_hash: node.candidate_hash, + parent_head_data_hash: node.parent_head_data_hash, + output_head_data_hash: node.output_head_data_hash, + candidate: node.fragment.candidate_clone(), + relay_parent: node.relay_parent(), + state: CandidateState::Backed, + }); + } + true } } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 1e963520ea1b..ddc95ab71b76 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -1145,7 +1145,8 @@ fn test_populate_and_check_potential() { // Simulate a best chain reorg by backing a2. { - let chain = chain.candidate_backed(&candidate_a2_hash); + let mut chain = chain.clone(); + chain.candidate_backed(&candidate_a2_hash); assert_eq!(chain.best_chain_vec(), vec![candidate_a2_hash, candidate_b2_hash]); // F is kept as it was truly unconnected. The rest will be trimmed. assert_eq!( @@ -1249,7 +1250,7 @@ fn test_populate_and_check_potential() { ) .unwrap(); - let chain = FragmentChain::populate(scope, new_storage); + let mut chain = FragmentChain::populate(scope, new_storage); assert_eq!(chain.best_chain_vec(), vec![candidate_d_hash]); assert_eq!( chain.unconnected().map(|c| c.candidate_hash).collect::>(), @@ -1257,7 +1258,7 @@ fn test_populate_and_check_potential() { ); // Mark E as backed. F will be dropped for invalid watermark. No other unconnected candidates. - let chain = chain.candidate_backed(&candidate_e_hash); + chain.candidate_backed(&candidate_e_hash); assert_eq!(chain.best_chain_vec(), vec![candidate_d_hash, candidate_e_hash]); assert_eq!(chain.unconnected_len(), 0); @@ -1374,30 +1375,30 @@ fn test_find_ancestor_path_and_find_backable_chain() { // Do tests with only a couple of candidates being backed. { - let chain = chain.clone(); - let chain = chain.candidate_backed(&&candidates[5]); + let mut chain = chain.clone(); + chain.candidate_backed(&&candidates[5]); for count in 0..10 { assert_eq!(chain.find_backable_chain(Ancestors::new(), count).len(), 0); } - let chain = chain.candidate_backed(&&candidates[3]); - let chain = chain.candidate_backed(&&candidates[4]); + chain.candidate_backed(&&candidates[3]); + chain.candidate_backed(&&candidates[4]); for count in 0..10 { assert_eq!(chain.find_backable_chain(Ancestors::new(), count).len(), 0); } - let chain = chain.candidate_backed(&&candidates[1]); + chain.candidate_backed(&&candidates[1]); for count in 0..10 { assert_eq!(chain.find_backable_chain(Ancestors::new(), count).len(), 0); } - let chain = chain.candidate_backed(&&candidates[0]); + chain.candidate_backed(&&candidates[0]); assert_eq!(chain.find_backable_chain(Ancestors::new(), 1), hashes(0..1)); for count in 2..10 { assert_eq!(chain.find_backable_chain(Ancestors::new(), count), hashes(0..2)); } // Now back the missing piece. - let chain = chain.candidate_backed(&&candidates[2]); + chain.candidate_backed(&&candidates[2]); assert_eq!(chain.best_chain_len(), 6); for count in 0..10 { assert_eq!( @@ -1414,7 +1415,7 @@ fn test_find_ancestor_path_and_find_backable_chain() { let mut candidates_shuffled = candidates.clone(); candidates_shuffled.shuffle(&mut thread_rng()); for candidate in candidates.iter() { - chain = chain.candidate_backed(candidate); + chain.candidate_backed(candidate); storage.mark_backed(candidate); } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 006c7fd9f3c7..3cb58f11c4d1 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -28,7 +28,7 @@ #![deny(unused_crate_dependencies)] -use std::collections::{hash_map::Entry, HashMap, HashSet}; +use std::collections::{HashMap, HashSet}; use fragment_chain::CandidateStorage; use futures::{channel::oneshot, prelude::*}; @@ -590,9 +590,9 @@ async fn handle_candidate_backed( for leaf in view.active_leaves.iter() { let Some(leaf_data) = view.per_relay_parent.get_mut(leaf) else { continue }; - if let Entry::Occupied(chain) = leaf_data.fragment_chains.entry(para) { + if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { found_para = true; - if chain.get().is_candidate_backed(&candidate_hash) { + if chain.is_candidate_backed(&candidate_hash) { gum::debug!( target: LOG_TARGET, para_id = ?para, @@ -600,17 +600,17 @@ async fn handle_candidate_backed( "Received redundant instruction to mark as backed an already backed candidate", ); found_candidate = true; - } else if chain.get().contains_unconnected_candidate(&candidate_hash) { + } else if chain.contains_unconnected_candidate(&candidate_hash) { found_candidate = true; - // Now that a candidate was backed, recreate the fragment chain. - let new_chain = chain.remove().candidate_backed(&candidate_hash); + // Mark the candidate as backed. This can recreate the fragment chain. + chain.candidate_backed(&candidate_hash); gum::trace!( target: LOG_TARGET, relay_parent = ?leaf, para_id = ?para, "Candidate backed. Candidate chain for para: {:?}", - new_chain.best_chain_vec() + chain.best_chain_vec() ); gum::trace!( @@ -618,11 +618,8 @@ async fn handle_candidate_backed( relay_parent = ?leaf, para_id = ?para, "Potential candidate storage for para: {:?}", - new_chain.unconnected().map(|candidate| candidate.hash()).collect::>() + chain.unconnected().map(|candidate| candidate.hash()).collect::>() ); - - // We removed the old chain, add the new one. - leaf_data.fragment_chains.insert(para, new_chain); } } } From a9c57991f0272a45e34c9900d8e4d0bf376bc076 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 8 Aug 2024 16:24:07 +0300 Subject: [PATCH 47/56] move public items to the top of the impl blocks --- .../src/fragment_chain/mod.rs | 168 +++++++++--------- 1 file changed, 85 insertions(+), 83 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index ee716ffe4240..adaea4ea7bc8 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -235,6 +235,11 @@ impl CandidateStorage { Ok(()) } + /// Consume self into an iterator over the stored candidates, in arbitrary order. + pub fn into_candidates(self) -> impl Iterator { + self.by_candidate_hash.into_values() + } + /// Remove a candidate from the store. fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) { @@ -274,11 +279,6 @@ impl CandidateStorage { self.by_candidate_hash.values() } - /// Consume self into an iterator over the stored candidates, in arbitrary order. - pub fn into_candidates(self) -> impl Iterator { - self.by_candidate_hash.into_values() - } - /// Try getting head-data by hash. fn head_data_by_hash(&self, hash: &Hash) -> Option<&HeadData> { // First, search for candidates outputting this head data and extract the head data @@ -363,6 +363,10 @@ impl CandidateEntry { Self::new(candidate_hash, candidate, persisted_validation_data, CandidateState::Seconded) } + pub fn hash(&self) -> CandidateHash { + self.candidate_hash + } + fn new( candidate_hash: CandidateHash, candidate: CommittedCandidateReceipt, @@ -394,10 +398,6 @@ impl CandidateEntry { }), }) } - - pub fn hash(&self) -> CandidateHash { - self.candidate_hash - } } impl HypotheticalOrConcreteCandidate for CandidateEntry { @@ -722,6 +722,45 @@ impl FragmentChain { ) } + /// Mark a candidate as backed. This can trigger a recreation of the best backable chain. + pub fn candidate_backed(&mut self, newly_backed_candidate: &CandidateHash) { + // Already backed. + if self.best_chain.candidates.contains(newly_backed_candidate) { + return + } + let Some(parent_head_hash) = self + .unconnected + .by_candidate_hash + .get(newly_backed_candidate) + .map(|entry| entry.parent_head_data_hash) + else { + // Candidate is not in unconnected storage. + return + }; + + // Mark the candidate hash. + self.unconnected.mark_backed(newly_backed_candidate); + + // Revert to parent_head_hash + if !self.revert_to(&parent_head_hash) { + // If nothing was reverted, there is nothing we can do for now. + return + } + + let mut prev_storage = std::mem::take(&mut self.unconnected); + + // Populate the chain. + self.populate_chain(&mut prev_storage); + + // Now that we picked the best backable chain, trim the forks generated by candidates + // which are not present in the best chain. We can start trimming from this candidate + // onwards. + self.trim_uneligible_forks(&mut prev_storage, Some(parent_head_hash)); + + // Finally, keep any candidates which haven't been trimmed but still have potential. + self.populate_unconnected_potential_candidates(prev_storage); + } + /// Return a new [`CandidateStorage`] containing all the candidates from this `FragmentChain`, /// as well as the unconnected ones. This does not contain the candidates that used to be /// pending availability. @@ -752,6 +791,41 @@ impl FragmentChain { storage } + /// Checks if this candidate could be added in the future to this chain. + /// This will return `Error::CandidateAlreadyKnown` if the candidate is already in the chain or + /// the unconnected candidate storage. + pub fn can_add_candidate_as_potential( + &self, + candidate: &impl HypotheticalOrConcreteCandidate, + ) -> Result<(), Error> { + let candidate_hash = candidate.candidate_hash(); + + if self.best_chain.contains(&candidate_hash) || self.unconnected.contains(&candidate_hash) { + return Err(Error::CandidateAlreadyKnown) + } + + self.check_potential(candidate) + } + + /// Try adding a seconded candidate, if the candidate has potential. It will never be added to + /// the chain directly in the seconded state, it will only be part of the unconnected storage. + pub fn try_adding_seconded_candidate( + &mut self, + candidate: &CandidateEntry, + ) -> Result<(), Error> { + if candidate.state == CandidateState::Backed { + return Err(Error::IntroduceBackedCandidate); + } + + self.can_add_candidate_as_potential(candidate)?; + + // This clone is cheap, as it uses an Arc for the expensive stuff. + // We can't consume the candidate because other fragment chains may use it also. + self.unconnected.add_candidate_entry(candidate.clone())?; + + Ok(()) + } + /// Try getting the full head data associated with this hash. pub fn get_head_data_by_hash(&self, head_data_hash: &Hash) -> Option { // First, see if this is the head data of the latest included candidate. @@ -877,41 +951,6 @@ impl FragmentChain { .unwrap_or_else(|| self.scope.earliest_relay_parent()) } - /// Checks if this candidate could be added in the future to this chain. - /// This will return `Error::CandidateAlreadyKnown` if the candidate is already in the chain or - /// the unconnected candidate storage. - pub fn can_add_candidate_as_potential( - &self, - candidate: &impl HypotheticalOrConcreteCandidate, - ) -> Result<(), Error> { - let candidate_hash = candidate.candidate_hash(); - - if self.best_chain.contains(&candidate_hash) || self.unconnected.contains(&candidate_hash) { - return Err(Error::CandidateAlreadyKnown) - } - - self.check_potential(candidate) - } - - /// Try adding a seconded candidate, if the candidate has potential. It will never be added to - /// the chain directly in the seconded state, it will only be part of the unconnected storage. - pub fn try_adding_seconded_candidate( - &mut self, - candidate: &CandidateEntry, - ) -> Result<(), Error> { - if candidate.state == CandidateState::Backed { - return Err(Error::IntroduceBackedCandidate); - } - - self.can_add_candidate_as_potential(candidate)?; - - // This clone is cheap, as it uses an Arc for the expensive stuff. - // We can't consume the candidate because other fragment chains may use it also. - self.unconnected.add_candidate_entry(candidate.clone())?; - - Ok(()) - } - // Populate the unconnected potential candidate storage starting from a previous storage. fn populate_unconnected_potential_candidates(&mut self, old_storage: CandidateStorage) { for candidate in old_storage.into_candidates() { @@ -1060,6 +1099,8 @@ impl FragmentChain { // Once the backable chain was populated, trim the forks generated by candidates which // are not present in the best chain. Fan this out into a full breadth-first search. + // If `starting_point` is `Some()`, start the search from the candidates having this parent head + // hash. fn trim_uneligible_forks(&self, storage: &mut CandidateStorage, starting_point: Option) { // Start out with the candidates in the chain. They are all valid candidates. let mut queue: VecDeque<_> = if let Some(starting_point) = starting_point { @@ -1275,45 +1316,6 @@ impl FragmentChain { } } - /// Mark a candidate as backed. This can trigger a recreation of the best backable chain. - pub fn candidate_backed(&mut self, newly_backed_candidate: &CandidateHash) { - // Already backed. - if self.best_chain.candidates.contains(newly_backed_candidate) { - return - } - let Some(parent_head_hash) = self - .unconnected - .by_candidate_hash - .get(newly_backed_candidate) - .map(|entry| entry.parent_head_data_hash) - else { - // Candidate is not in unconnected storage. - return - }; - - // Mark the candidate hash. - self.unconnected.mark_backed(newly_backed_candidate); - - // Revert to parent_head_hash - if !self.revert_to(&parent_head_hash) { - // If nothing was reverted, there is nothing we can do for now. - return - } - - let mut prev_storage = std::mem::take(&mut self.unconnected); - - // Populate the chain. - self.populate_chain(&mut prev_storage); - - // Now that we picked the best backable chain, trim the forks generated by candidates - // which are not present in the best chain. We can start trimming from this candidate - // onwards. - self.trim_uneligible_forks(&mut prev_storage, Some(parent_head_hash)); - - // Finally, keep any candidates which haven't been trimmed but still have potential. - self.populate_unconnected_potential_candidates(prev_storage); - } - // Revert the best backable chain so that the last candidate will be one outputting the given // `parent_head_hash`. If the `parent_head_hash` is exactly the required parent of the base // constraints (builds on the latest included candidate), revert the entire chain. From 243466fdaa90050586963f3c0538c2076e42d1db Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 8 Aug 2024 16:51:57 +0300 Subject: [PATCH 48/56] remove comment --- .../network/collator-protocol/src/validator_side/collation.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index 2bb8e08e3c93..96ffe9f13db3 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -148,9 +148,6 @@ pub fn fetched_collation_sanity_check( .prospective_candidate .map_or(false, |pc| pc.candidate_hash() != fetched.hash()) { - // Note: it's important that we check for this and punish a collator that advertises a false - // candidate hash, because it can be misused to prioritise a specific collation, according - // to the fork choice rule in Prospective Parachains. Err(SecondingError::CandidateHashMismatch) } else if maybe_parent_head_and_hash.map_or(false, |(head, hash)| head.hash() != hash) { Err(SecondingError::ParentHeadDataMismatch) From 78cf31af90d7ed9662ffacc2f555725887a25e8e Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 8 Aug 2024 17:19:53 +0300 Subject: [PATCH 49/56] refactor fragment chain constructor and acitve leave update handling --- .../src/fragment_chain/mod.rs | 85 ++++++++-------- .../src/fragment_chain/tests.rs | 99 +++++++++++-------- .../core/prospective-parachains/src/lib.rs | 28 +++--- 3 files changed, 116 insertions(+), 96 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index adaea4ea7bc8..00f2e179ec69 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -659,28 +659,63 @@ pub(crate) struct FragmentChain { } impl FragmentChain { - /// Create a new [`FragmentChain`] with given scope and populated from the given storage. - /// The `prev_storage` should contain the candidates of the `FragmentChain` at the previous - /// relay parent, as well as the candidates pending availability at this relay parent. - pub fn populate(scope: Scope, mut prev_storage: CandidateStorage) -> Self { - // Initialize as empty + /// Create a new [`FragmentChain`] with the given scope and populate it with the candidates + /// pending availability. + pub fn init(scope: Scope, mut candidates_pending_availability: CandidateStorage) -> Self { let mut fragment_chain = Self { scope, best_chain: BackedChain::default(), unconnected: CandidateStorage::default(), }; + // We only need to populate the best backable chain. Candidates pending availability must + // form a chain with the latest included head. + fragment_chain.populate_chain(&mut candidates_pending_availability); + + // TODO: return error if not all candidates were introduced successfully. + + fragment_chain + } + + /// Populate the [`FragmentChain`] given the new candidates pending availability and the + /// optional previous fragment chain (of the previous relay parent). + pub fn populate_from_previous(&mut self, prev_fragment_chain: &FragmentChain) { + let mut prev_storage = prev_fragment_chain.unconnected.clone(); + + for candidate in prev_fragment_chain.best_chain.chain.iter() { + // If they used to be pending availability, don't add them. This is fine + // because: + // - if they still are pending availability, they have already been added to the new + // storage. + // - if they were included, no point in keeping them. + // + // This cannot happen for the candidates in the unconnected storage. The pending + // availability candidates will always be part of the best chain. + if prev_fragment_chain + .scope + .get_pending_availability(&candidate.candidate_hash) + .is_none() + { + let _ = prev_storage.add_candidate_entry(CandidateEntry { + candidate_hash: candidate.candidate_hash, + parent_head_data_hash: candidate.parent_head_data_hash, + output_head_data_hash: candidate.output_head_data_hash, + relay_parent: candidate.relay_parent(), + candidate: candidate.fragment.candidate_clone(), // This clone is very cheap. + state: CandidateState::Backed, + }); + } + } + // First populate the best backable chain. - fragment_chain.populate_chain(&mut prev_storage); + self.populate_chain(&mut prev_storage); // Now that we picked the best backable chain, trim the forks generated by candidates which // are not present in the best chain. - fragment_chain.trim_uneligible_forks(&mut prev_storage, None); + self.trim_uneligible_forks(&mut prev_storage, None); // Finally, keep any candidates which haven't been trimmed but still have potential. - fragment_chain.populate_unconnected_potential_candidates(prev_storage); - - fragment_chain + self.populate_unconnected_potential_candidates(prev_storage); } /// Get the scope of the [`FragmentChain`]. @@ -761,36 +796,6 @@ impl FragmentChain { self.populate_unconnected_potential_candidates(prev_storage); } - /// Return a new [`CandidateStorage`] containing all the candidates from this `FragmentChain`, - /// as well as the unconnected ones. This does not contain the candidates that used to be - /// pending availability. - pub fn advance_scope(&self) -> CandidateStorage { - let mut storage = self.unconnected.clone(); - - for candidate in self.best_chain.chain.iter() { - // If they used to be pending availability, don't add them. This is fine - // because: - // - if they still are pending availability, they have already been added to the new - // storage. - // - if they were included, no point in keeping them. - // - // This cannot happen for the candidates in the unconnected storage. The pending - // availability candidates will always be part of the best chain. - if self.scope.get_pending_availability(&candidate.candidate_hash).is_none() { - let _ = storage.add_candidate_entry(CandidateEntry { - candidate_hash: candidate.candidate_hash, - parent_head_data_hash: candidate.parent_head_data_hash, - output_head_data_hash: candidate.output_head_data_hash, - relay_parent: candidate.relay_parent(), - candidate: candidate.fragment.candidate_clone(), // This clone is very cheap. - state: CandidateState::Backed, - }); - } - } - - storage - } - /// Checks if this candidate could be added in the future to this chain. /// This will return `Error::CandidateAlreadyKnown` if the candidate is already in the chain or /// the unconnected candidate storage. diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index ddc95ab71b76..5a64819dc4e3 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -87,6 +87,18 @@ fn make_committed_candidate( (persisted_validation_data, candidate) } +fn populate_chain_from_previous_storage( + scope: &Scope, + storage: &CandidateStorage, +) -> FragmentChain { + let mut chain = FragmentChain::init(scope.clone(), CandidateStorage::default()); + let mut prev_chain = chain.clone(); + prev_chain.unconnected = storage.clone(); + + chain.populate_from_previous(&prev_chain); + chain +} + #[test] fn scope_rejects_ancestors_that_skip_blocks() { let relay_parent = RelayChainBlockInfo { @@ -378,9 +390,8 @@ fn candidate_storage_methods() { } #[test] -fn populate_empty() { +fn init_and_populate_from_empty() { // Empty chain and empty storage. - let storage = CandidateStorage::default(); let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); let scope = Scope::with_ancestors( @@ -395,7 +406,12 @@ fn populate_empty() { vec![], ) .unwrap(); - let chain = FragmentChain::populate(scope, storage); + let chain = FragmentChain::init(scope.clone(), CandidateStorage::default()); + assert_eq!(chain.best_chain_len(), 0); + assert_eq!(chain.unconnected_len(), 0); + + let mut new_chain = FragmentChain::init(scope, CandidateStorage::default()); + new_chain.populate_from_previous(&chain); assert_eq!(chain.best_chain_len(), 0); assert_eq!(chain.unconnected_len(), 0); } @@ -482,7 +498,7 @@ fn test_populate_and_check_potential() { ancestors.clone(), ) .unwrap(); - let chain = FragmentChain::populate(scope, storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &storage); assert!(chain.best_chain_vec().is_empty()); @@ -520,12 +536,12 @@ fn test_populate_and_check_potential() { ancestors.clone(), ) .unwrap(); - let chain = FragmentChain::populate(scope.clone(), CandidateStorage::default()); + let chain = FragmentChain::init(scope.clone(), CandidateStorage::default()); assert!(chain.can_add_candidate_as_potential(&candidate_a_entry).is_ok()); assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok()); assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); - let chain = FragmentChain::populate(scope, storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &storage); assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash]); assert_eq!( chain.unconnected().map(|c| c.candidate_hash).collect::>(), @@ -541,12 +557,12 @@ fn test_populate_and_check_potential() { ancestors.clone(), ) .unwrap(); - let chain = FragmentChain::populate(scope.clone(), CandidateStorage::default()); + let chain = FragmentChain::init(scope.clone(), CandidateStorage::default()); assert!(chain.can_add_candidate_as_potential(&candidate_a_entry).is_ok()); assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok()); assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); - let chain = FragmentChain::populate(scope, storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &storage); assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); assert_eq!( chain.unconnected().map(|c| c.candidate_hash).collect::>(), @@ -563,12 +579,12 @@ fn test_populate_and_check_potential() { ancestors.clone(), ) .unwrap(); - let chain = FragmentChain::populate(scope.clone(), CandidateStorage::default()); + let chain = FragmentChain::init(scope.clone(), CandidateStorage::default()); assert!(chain.can_add_candidate_as_potential(&candidate_a_entry).is_ok()); assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok()); assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); - let chain = FragmentChain::populate(scope, storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &storage); assert_eq!( chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash] @@ -590,7 +606,7 @@ fn test_populate_and_check_potential() { ancestors_without_x, ) .unwrap(); - let chain = FragmentChain::populate(scope, storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &storage); assert!(chain.best_chain_vec().is_empty()); assert_eq!(chain.unconnected_len(), 0); @@ -613,7 +629,8 @@ fn test_populate_and_check_potential() { vec![], ) .unwrap(); - let chain = FragmentChain::populate(scope, storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &storage); + assert!(chain.best_chain_vec().is_empty()); assert_eq!(chain.unconnected_len(), 0); @@ -659,7 +676,7 @@ fn test_populate_and_check_potential() { ) .unwrap(); - let chain = FragmentChain::populate(scope.clone(), modified_storage); + let chain = populate_chain_from_previous_storage(&scope, &modified_storage); assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); assert_eq!(chain.unconnected_len(), 0); @@ -668,7 +685,7 @@ fn test_populate_and_check_potential() { Err(Error::Cycle) ); // However, if taken independently, C still has potential, since we don't know A and B. - let chain = FragmentChain::populate(scope.clone(), CandidateStorage::default()); + let chain = FragmentChain::init(scope.clone(), CandidateStorage::default()); assert!(chain.can_add_candidate_as_potential(&wrong_candidate_c_entry).is_ok()); } @@ -701,7 +718,8 @@ fn test_populate_and_check_potential() { ) .unwrap(); - let chain = FragmentChain::populate(scope, modified_storage); + let chain = populate_chain_from_previous_storage(&scope, &modified_storage); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); assert_eq!(chain.unconnected_len(), 0); assert_matches!( @@ -741,10 +759,11 @@ fn test_populate_and_check_potential() { ancestors.clone(), ) .unwrap(); - let chain = FragmentChain::populate(scope.clone(), CandidateStorage::default()); + let chain = FragmentChain::init(scope.clone(), CandidateStorage::default()); assert!(chain.can_add_candidate_as_potential(&unconnected_candidate_c_entry).is_ok()); - let chain = FragmentChain::populate(scope, modified_storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &modified_storage); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); assert_eq!( chain.unconnected().map(|c| c.candidate_hash).collect::>(), @@ -790,7 +809,7 @@ fn test_populate_and_check_potential() { ) .unwrap(); - let chain = FragmentChain::populate(scope, modified_storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &modified_storage); assert_eq!(chain.best_chain_vec(), vec![modified_candidate_a_hash, candidate_b_hash]); assert_eq!(chain.unconnected_len(), 0); assert_matches!( @@ -836,7 +855,7 @@ fn test_populate_and_check_potential() { ) .unwrap(); - let chain = FragmentChain::populate(scope, modified_storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &modified_storage); assert_eq!(chain.best_chain_vec(), vec![modified_candidate_a_hash, candidate_b_hash]); assert_eq!(chain.unconnected_len(), 0); assert_matches!( @@ -885,7 +904,7 @@ fn test_populate_and_check_potential() { ancestors.clone(), ) .unwrap(); - let chain = FragmentChain::populate(scope, storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &storage); assert_eq!( chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash] @@ -907,7 +926,8 @@ fn test_populate_and_check_potential() { ancestors_without_x, ) .unwrap(); - let chain = FragmentChain::populate(scope, storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &storage); + assert_eq!( chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash] @@ -941,7 +961,7 @@ fn test_populate_and_check_potential() { vec![], ) .unwrap(); - let chain = FragmentChain::populate(scope, storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &storage); assert!(chain.best_chain_vec().is_empty()); assert_eq!(chain.unconnected_len(), 0); } @@ -981,7 +1001,7 @@ fn test_populate_and_check_potential() { let candidate_d_hash = candidate_d.hash(); let candidate_d_entry = CandidateEntry::new(candidate_d_hash, candidate_d, pvd_d, CandidateState::Backed).unwrap(); - assert!(FragmentChain::populate(scope.clone(), storage.clone()) + assert!(populate_chain_from_previous_storage(&scope, &storage) .can_add_candidate_as_potential(&candidate_d_entry) .is_ok()); storage.add_candidate_entry(candidate_d_entry).unwrap(); @@ -999,7 +1019,7 @@ fn test_populate_and_check_potential() { let candidate_f_entry = CandidateEntry::new(candidate_f_hash, candidate_f, pvd_f, CandidateState::Seconded) .unwrap(); - assert!(FragmentChain::populate(scope.clone(), storage.clone()) + assert!(populate_chain_from_previous_storage(&scope, &storage) .can_add_candidate_as_potential(&candidate_f_entry) .is_ok()); storage.add_candidate_entry(candidate_f_entry.clone()).unwrap(); @@ -1021,7 +1041,7 @@ fn test_populate_and_check_potential() { assert_eq!(fork_selection_rule(&candidate_a_hash, &candidate_a1_hash), Ordering::Less); assert_matches!( - FragmentChain::populate(scope.clone(), storage.clone()) + populate_chain_from_previous_storage(&scope, &storage) .can_add_candidate_as_potential(&candidate_a1_entry), Err(Error::ForkChoiceRule(other)) if candidate_a_hash == other ); @@ -1041,7 +1061,7 @@ fn test_populate_and_check_potential() { let candidate_b1_entry = CandidateEntry::new(candidate_b1_hash, candidate_b1, pvd_b1, CandidateState::Seconded) .unwrap(); - assert!(FragmentChain::populate(scope.clone(), storage.clone()) + assert!(populate_chain_from_previous_storage(&scope, &storage) .can_add_candidate_as_potential(&candidate_b1_entry) .is_ok()); @@ -1060,7 +1080,7 @@ fn test_populate_and_check_potential() { let candidate_c1_entry = CandidateEntry::new(candidate_c1_hash, candidate_c1, pvd_c1, CandidateState::Backed) .unwrap(); - assert!(FragmentChain::populate(scope.clone(), storage.clone()) + assert!(populate_chain_from_previous_storage(&scope, &storage) .can_add_candidate_as_potential(&candidate_c1_entry) .is_ok()); @@ -1079,7 +1099,7 @@ fn test_populate_and_check_potential() { let candidate_c2_entry = CandidateEntry::new(candidate_c2_hash, candidate_c2, pvd_c2, CandidateState::Seconded) .unwrap(); - assert!(FragmentChain::populate(scope.clone(), storage.clone()) + assert!(populate_chain_from_previous_storage(&scope, &storage) .can_add_candidate_as_potential(&candidate_c2_entry) .is_ok()); storage.add_candidate_entry(candidate_c2_entry).unwrap(); @@ -1100,7 +1120,7 @@ fn test_populate_and_check_potential() { // Candidate A2 is created so that its hash is larger than the candidate A hash. assert_eq!(fork_selection_rule(&candidate_a2_hash, &candidate_a_hash), Ordering::Less); - assert!(FragmentChain::populate(scope.clone(), storage.clone()) + assert!(populate_chain_from_previous_storage(&scope, &storage) .can_add_candidate_as_potential(&candidate_a2_entry) .is_ok()); @@ -1119,12 +1139,12 @@ fn test_populate_and_check_potential() { let candidate_b2_entry = CandidateEntry::new(candidate_b2_hash, candidate_b2, pvd_b2, CandidateState::Backed) .unwrap(); - assert!(FragmentChain::populate(scope.clone(), storage.clone()) + assert!(populate_chain_from_previous_storage(&scope, &storage) .can_add_candidate_as_potential(&candidate_b2_entry) .is_ok()); storage.add_candidate_entry(candidate_b2_entry).unwrap(); - let chain = FragmentChain::populate(scope.clone(), storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &storage); assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); assert_eq!( chain.unconnected().map(|c| c.candidate_hash).collect::>(), @@ -1185,7 +1205,7 @@ fn test_populate_and_check_potential() { ) .unwrap(); - let chain = FragmentChain::populate(scope, storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &storage); assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); assert_eq!( chain.unconnected().map(|c| c.candidate_hash).collect::>(), @@ -1224,7 +1244,7 @@ fn test_populate_and_check_potential() { .unwrap(); // A2 and B2 will now be trimmed - let chain = FragmentChain::populate(scope.clone(), storage.clone()); + let chain = populate_chain_from_previous_storage(&scope, &storage); assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); assert_eq!( chain.unconnected().map(|c| c.candidate_hash).collect::>(), @@ -1238,8 +1258,6 @@ fn test_populate_and_check_potential() { // Simulate the fact that candidates A, B and C have been included. - let new_storage = chain.advance_scope(); - let base_constraints = make_constraints(0, vec![0], HeadData(vec![0x0d])); let scope = Scope::with_ancestors( relay_parent_z_info.clone(), @@ -1250,7 +1268,9 @@ fn test_populate_and_check_potential() { ) .unwrap(); - let mut chain = FragmentChain::populate(scope, new_storage); + let prev_chain = chain; + let mut chain = FragmentChain::init(scope, CandidateStorage::default()); + chain.populate_from_previous(&prev_chain); assert_eq!(chain.best_chain_vec(), vec![candidate_d_hash]); assert_eq!( chain.unconnected().map(|c| c.candidate_hash).collect::>(), @@ -1275,7 +1295,6 @@ fn test_find_ancestor_path_and_find_backable_chain_empty_best_chain() { let max_depth = 10; // Empty chain - let storage = CandidateStorage::default(); let base_constraints = make_constraints(0, vec![0], required_parent.clone()); let relay_parent_info = @@ -1284,7 +1303,7 @@ fn test_find_ancestor_path_and_find_backable_chain_empty_best_chain() { let scope = Scope::with_ancestors(relay_parent_info, base_constraints, vec![], max_depth, vec![]) .unwrap(); - let chain = FragmentChain::populate(scope, storage); + let chain = FragmentChain::init(scope, CandidateStorage::default()); assert_eq!(chain.best_chain_len(), 0); assert_eq!(chain.find_ancestor_path(Ancestors::new()), 0); @@ -1361,7 +1380,7 @@ fn test_find_ancestor_path_and_find_backable_chain() { vec![], ) .unwrap(); - let mut chain = FragmentChain::populate(scope, storage.clone()); + let mut chain = populate_chain_from_previous_storage(&scope, &storage); // For now, candidates are only seconded, not backed. So the best chain is empty and no // candidate will be returned. @@ -1489,7 +1508,7 @@ fn test_find_ancestor_path_and_find_backable_chain() { vec![], ) .unwrap(); - let chain = FragmentChain::populate(scope, storage); + let chain = populate_chain_from_previous_storage(&scope, &storage); let ancestors: Ancestors = [candidates[0], candidates[1]].into_iter().collect(); assert_eq!( // Stop at 4. diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 3cb58f11c4d1..34d2651d2ff2 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -278,11 +278,11 @@ async fn handle_active_leaves_update( .await?; let mut compact_pending = Vec::with_capacity(pending_availability.len()); - let mut new_storage = CandidateStorage::default(); + let mut pending_availability_storage = CandidateStorage::default(); for c in pending_availability { let candidate_hash = c.compact.candidate_hash; - let res = new_storage.add_pending_availability_candidate( + let res = pending_availability_storage.add_pending_availability_candidate( candidate_hash, c.candidate, c.persisted_validation_data, @@ -331,18 +331,6 @@ async fn handle_active_leaves_update( }, }; - // Get the candidate storage of the parent leaf, if present. - let prev_fragment_chain = prev_fragment_chains.and_then(|chains| chains.get(¶)); - - if let Some(prev_fragment_chain) = prev_fragment_chain { - // Add old candidates to the new storage only after we added the pending - // availability candidates. The pending candidates have higher priority and can - // conflict with the old candidates. - for candidate in prev_fragment_chain.advance_scope().into_candidates() { - let _ = new_storage.add_candidate_entry(candidate); - } - } - gum::trace!( target: LOG_TARGET, relay_parent = ?hash, @@ -352,8 +340,16 @@ async fn handle_active_leaves_update( "Creating fragment chain" ); - // Finally, populate the fragment chain. - let chain = FragmentChain::populate(scope, new_storage); + // Init the fragment chain with the pending availability candidates. + let mut chain = FragmentChain::init(scope, pending_availability_storage); + + // If we know the previous fragment chain, use that for further populating the fragment + // chain. + if let Some(prev_fragment_chain) = + prev_fragment_chains.and_then(|chains| chains.get(¶)) + { + chain.populate_from_previous(prev_fragment_chain); + } gum::trace!( target: LOG_TARGET, From 0243915d005265626f772f0d29d461add474731a Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 8 Aug 2024 17:35:10 +0300 Subject: [PATCH 50/56] restrict the visibility of some items --- .../src/fragment_chain/mod.rs | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 00f2e179ec69..13b04de33582 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -216,7 +216,7 @@ impl CandidateStorage { } /// Introduce a new candidate entry. - pub fn add_candidate_entry(&mut self, candidate: CandidateEntry) -> Result<(), Error> { + fn add_candidate_entry(&mut self, candidate: CandidateEntry) -> Result<(), Error> { let candidate_hash = candidate.candidate_hash; if self.by_candidate_hash.contains_key(&candidate_hash) { return Err(Error::CandidateAlreadyKnown) @@ -235,11 +235,6 @@ impl CandidateStorage { Ok(()) } - /// Consume self into an iterator over the stored candidates, in arbitrary order. - pub fn into_candidates(self) -> impl Iterator { - self.by_candidate_hash.into_values() - } - /// Remove a candidate from the store. fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) { @@ -542,22 +537,24 @@ impl Scope { self.ancestors_by_hash.get(hash).map(|info| info.clone()) } + /// Get the base constraints of the scope + pub fn base_constraints(&self) -> &Constraints { + &self.base_constraints + } + /// Whether the candidate in question is one pending availability in this scope. - pub fn get_pending_availability( + fn get_pending_availability( &self, candidate_hash: &CandidateHash, ) -> Option<&PendingAvailability> { self.pending_availability.iter().find(|c| &c.candidate_hash == candidate_hash) } - - /// Get the base constraints of the scope - pub fn base_constraints(&self) -> &Constraints { - &self.base_constraints - } } #[cfg_attr(test, derive(Clone))] -pub struct FragmentNode { +/// A node that is part of a `BackedChain`. It holds constraints based on the ancestors in the +/// chain. +struct FragmentNode { fragment: Fragment, candidate_hash: CandidateHash, cumulative_modifications: ConstraintModifications, @@ -958,7 +955,7 @@ impl FragmentChain { // Populate the unconnected potential candidate storage starting from a previous storage. fn populate_unconnected_potential_candidates(&mut self, old_storage: CandidateStorage) { - for candidate in old_storage.into_candidates() { + for candidate in old_storage.by_candidate_hash.into_values() { // Sanity check, all pending availability candidates should be already present in the // chain. if self.scope.get_pending_availability(&candidate.candidate_hash).is_some() { From 034706ebd045a79ee752a8aeba6576c08e5f848c Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 8 Aug 2024 17:46:46 +0300 Subject: [PATCH 51/56] dedup into a From impl --- .../src/fragment_chain/mod.rs | 36 ++++++++++--------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 13b04de33582..8899e4d77951 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -568,6 +568,22 @@ impl FragmentNode { } } +impl From<&FragmentNode> for CandidateEntry { + fn from(node: &FragmentNode) -> Self { + // We don't need to perform the checks done in `CandidateEntry::new()`, since a + // `FragmentNode` always comes from a `CandidateEntry` + Self { + candidate_hash: node.candidate_hash, + parent_head_data_hash: node.parent_head_data_hash, + output_head_data_hash: node.output_head_data_hash, + candidate: node.fragment.candidate_clone(), + relay_parent: node.relay_parent(), + // A fragment node is always backed. + state: CandidateState::Backed, + } + } +} + /// A candidate chain of backed/backable candidates. /// Includes the candidates pending availability and candidates which may be backed on-chain. #[derive(Default)] @@ -693,14 +709,7 @@ impl FragmentChain { .get_pending_availability(&candidate.candidate_hash) .is_none() { - let _ = prev_storage.add_candidate_entry(CandidateEntry { - candidate_hash: candidate.candidate_hash, - parent_head_data_hash: candidate.parent_head_data_hash, - output_head_data_hash: candidate.output_head_data_hash, - relay_parent: candidate.relay_parent(), - candidate: candidate.fragment.candidate_clone(), // This clone is very cheap. - state: CandidateState::Backed, - }); + let _ = prev_storage.add_candidate_entry(candidate.into()); } } @@ -1337,15 +1346,8 @@ impl FragmentChain { // Even if it's empty, we need to return true, because we'll be able to add a new candidate // to the chain. - for node in removed_items { - let _ = self.unconnected.add_candidate_entry(CandidateEntry { - candidate_hash: node.candidate_hash, - parent_head_data_hash: node.parent_head_data_hash, - output_head_data_hash: node.output_head_data_hash, - candidate: node.fragment.candidate_clone(), - relay_parent: node.relay_parent(), - state: CandidateState::Backed, - }); + for node in &removed_items { + let _ = self.unconnected.add_candidate_entry(node.into()); } true } From 09eb5eea495906f9af2dd69acfb1df81487a0a18 Mon Sep 17 00:00:00 2001 From: alindima Date: Thu, 8 Aug 2024 17:49:24 +0300 Subject: [PATCH 52/56] update prdoc --- prdoc/pr_4937.prdoc | 2 -- 1 file changed, 2 deletions(-) diff --git a/prdoc/pr_4937.prdoc b/prdoc/pr_4937.prdoc index 916340e42545..37b7bc3dda59 100644 --- a/prdoc/pr_4937.prdoc +++ b/prdoc/pr_4937.prdoc @@ -17,7 +17,5 @@ crates: bump: minor - name: polkadot-node-core-provisioner bump: none - - name: polkadot-collator-protocol - bump: none - name: polkadot-statement-distribution bump: none From 64546fea453ef68b9cf4fe62fe4af49615b1be74 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 9 Aug 2024 15:59:01 +0300 Subject: [PATCH 53/56] update some comments --- .../src/fragment_chain/mod.rs | 14 ++++++++------ .../src/fragment_chain/tests.rs | 4 ++-- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 8899e4d77951..dc913d06947a 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -47,11 +47,13 @@ //! their parent (so we may parallelize the backing process across different groups for elastic //! scaling) and so that we accept parachain forks. //! -//! We accept parachain forks only until reaching the backing quorum. After that, we assume all -//! validators pick the same fork according to the fork selection rule. If we decided to not accept -//! parachain forks, candidates could end up getting only half of the backing votes or even less -//! (for forks of larger arity). This would affect the validator rewards. Still, we don't guarantee -//! that a fork-producing parachains will be able to fully use elastic scaling. +//! We accept parachain forks only if the fork selection rule allows for it. In other words, if we +//! have a backed candidate, we begin seconding/validating a fork only if it has a lower candidate +//! hash. Once both forks are backed, we discard the one with the higher candidate hash. +//! We assume all validators pick the same fork according to the fork selection rule. If we decided +//! to not accept parachain forks, candidates could end up getting only half of the backing votes or +//! even less (for forks of larger arity). This would affect the validator rewards. Still, we don't +//! guarantee that a fork-producing parachains will be able to fully use elastic scaling. //! //! Once a candidate is backed and becomes part of the best chain, we can trim from the //! unconnected storage candidates which constitute forks on the best chain and no longer have @@ -1204,7 +1206,7 @@ impl FragmentChain { let possible_children = storage .possible_backed_para_children(&required_head_hash) .filter_map(|candidate| { - // Only select a candidates if: + // Only select a candidate if: // 1. it does not introduce a fork or a cycle. // 2. parent hash is correct. // 3. relay-parent does not move backwards. diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 5a64819dc4e3..9886d19e5224 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -1037,7 +1037,7 @@ fn test_populate_and_check_potential() { let candidate_a1_entry = CandidateEntry::new(candidate_a1_hash, candidate_a1, pvd_a1, CandidateState::Backed) .unwrap(); - // Candidate A1 is created so that its hash is larger than the candidate A hash. + // Candidate A1 is created so that its hash is greater than the candidate A hash. assert_eq!(fork_selection_rule(&candidate_a_hash, &candidate_a1_hash), Ordering::Less); assert_matches!( @@ -1117,7 +1117,7 @@ fn test_populate_and_check_potential() { let candidate_a2_entry = CandidateEntry::new(candidate_a2_hash, candidate_a2, pvd_a2, CandidateState::Seconded) .unwrap(); - // Candidate A2 is created so that its hash is larger than the candidate A hash. + // Candidate A2 is created so that its hash is greater than the candidate A hash. assert_eq!(fork_selection_rule(&candidate_a2_hash, &candidate_a_hash), Ordering::Less); assert!(populate_chain_from_previous_storage(&scope, &storage) From 1c31af04288ea632fcaa8cc77edce33ef73430a1 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 9 Aug 2024 16:12:18 +0300 Subject: [PATCH 54/56] address a todo --- .../src/fragment_chain/mod.rs | 11 +++++------ .../node/core/prospective-parachains/src/lib.rs | 13 +++++++++++++ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index dc913d06947a..b060897d4391 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -217,6 +217,11 @@ impl CandidateStorage { self.add_candidate_entry(entry) } + /// Return the number of stored candidates. + pub fn len(&self) -> usize { + self.by_candidate_hash.len() + } + /// Introduce a new candidate entry. fn add_candidate_entry(&mut self, candidate: CandidateEntry) -> Result<(), Error> { let candidate_hash = candidate.candidate_hash; @@ -313,10 +318,6 @@ impl CandidateStorage { }) }) } - - fn len(&self) -> usize { - self.by_candidate_hash.len() - } } /// The state of a candidate. @@ -687,8 +688,6 @@ impl FragmentChain { // form a chain with the latest included head. fragment_chain.populate_chain(&mut candidates_pending_availability); - // TODO: return error if not all candidates were introduced successfully. - fragment_chain } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 34d2651d2ff2..bea9a7fca6d6 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -340,9 +340,22 @@ async fn handle_active_leaves_update( "Creating fragment chain" ); + let number_of_pending_candidates = pending_availability_storage.len(); + // Init the fragment chain with the pending availability candidates. let mut chain = FragmentChain::init(scope, pending_availability_storage); + if chain.best_chain_len() < number_of_pending_candidates { + gum::warn!( + target: LOG_TARGET, + relay_parent = ?hash, + para_id = ?para, + "Not all pending availability candidates could be introduced. Actual vs expected count: {}, {}", + chain.best_chain_len(), + number_of_pending_candidates + ) + } + // If we know the previous fragment chain, use that for further populating the fragment // chain. if let Some(prev_fragment_chain) = From 142432df82f3c41d2cf817d95a30c530f2c5368a Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 9 Aug 2024 16:43:15 +0300 Subject: [PATCH 55/56] keep tracking candidates for deactivated leaves in implicit view --- .../core/prospective-parachains/src/lib.rs | 149 ++++++++++-------- .../prospective-parachains/src/metrics.rs | 2 +- .../core/prospective-parachains/src/tests.rs | 54 ++++++- 3 files changed, 133 insertions(+), 72 deletions(-) diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index bea9a7fca6d6..ecb1f3a476ec 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -524,44 +524,48 @@ async fn handle_introduce_seconded_candidate( let mut added = false; let mut para_scheduled = false; - for leaf in view.active_leaves.iter() { - let Some(leaf_data) = view.per_relay_parent.get_mut(leaf) else { continue }; + // We don't iterate only through the active leaves. We also update the deactivated parents in + // the implicit view, so that their upcoming children may see these candidates. + for (relay_parent, rp_data) in view.per_relay_parent.iter_mut() { + let Some(chain) = rp_data.fragment_chains.get_mut(¶) else { continue }; + let is_active_leaf = view.active_leaves.contains(relay_parent); - if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { - para_scheduled = true; + para_scheduled = true; - match chain.try_adding_seconded_candidate(&candidate_entry) { - Ok(()) => { - gum::debug!( - target: LOG_TARGET, - para = ?para, - relay_parent = ?leaf, - "Added seconded candidate {:?}", - candidate_hash - ); - added = true; - }, - Err(FragmentChainError::CandidateAlreadyKnown) => { - gum::debug!( - target: LOG_TARGET, - para = ?para, - relay_parent = ?leaf, - "Attempting to introduce an already known candidate: {:?}", - candidate_hash - ); - added = true; - }, - Err(err) => { - gum::debug!( - target: LOG_TARGET, - para = ?para, - relay_parent = ?leaf, - ?candidate_hash, - "Cannot introduce seconded candidate: {}", - err - ) - }, - } + match chain.try_adding_seconded_candidate(&candidate_entry) { + Ok(()) => { + gum::debug!( + target: LOG_TARGET, + ?para, + ?relay_parent, + ?is_active_leaf, + "Added seconded candidate {:?}", + candidate_hash + ); + added = true; + }, + Err(FragmentChainError::CandidateAlreadyKnown) => { + gum::debug!( + target: LOG_TARGET, + ?para, + ?relay_parent, + ?is_active_leaf, + "Attempting to introduce an already known candidate: {:?}", + candidate_hash + ); + added = true; + }, + Err(err) => { + gum::debug!( + target: LOG_TARGET, + ?para, + ?relay_parent, + ?candidate_hash, + ?is_active_leaf, + "Cannot introduce seconded candidate: {}", + err + ) + }, } } @@ -579,7 +583,7 @@ async fn handle_introduce_seconded_candidate( target: LOG_TARGET, para = ?para, candidate = ?candidate_hash, - "Newly-seconded candidate cannot be kept under any active leaf", + "Newly-seconded candidate cannot be kept under any relay parent", ); } @@ -596,47 +600,52 @@ async fn handle_candidate_backed( let mut found_candidate = false; let mut found_para = false; - for leaf in view.active_leaves.iter() { - let Some(leaf_data) = view.per_relay_parent.get_mut(leaf) else { continue }; - if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { - found_para = true; - if chain.is_candidate_backed(&candidate_hash) { - gum::debug!( - target: LOG_TARGET, - para_id = ?para, - ?candidate_hash, - "Received redundant instruction to mark as backed an already backed candidate", - ); - found_candidate = true; - } else if chain.contains_unconnected_candidate(&candidate_hash) { - found_candidate = true; - // Mark the candidate as backed. This can recreate the fragment chain. - chain.candidate_backed(&candidate_hash); + // We don't iterate only through the active leaves. We also update the deactivated parents in + // the implicit view, so that their upcoming children may see these candidates. + for (relay_parent, rp_data) in view.per_relay_parent.iter_mut() { + let Some(chain) = rp_data.fragment_chains.get_mut(¶) else { continue }; + let is_active_leaf = view.active_leaves.contains(relay_parent); - gum::trace!( - target: LOG_TARGET, - relay_parent = ?leaf, - para_id = ?para, - "Candidate backed. Candidate chain for para: {:?}", - chain.best_chain_vec() - ); + found_para = true; + if chain.is_candidate_backed(&candidate_hash) { + gum::debug!( + target: LOG_TARGET, + ?para, + ?candidate_hash, + ?is_active_leaf, + "Received redundant instruction to mark as backed an already backed candidate", + ); + found_candidate = true; + } else if chain.contains_unconnected_candidate(&candidate_hash) { + found_candidate = true; + // Mark the candidate as backed. This can recreate the fragment chain. + chain.candidate_backed(&candidate_hash); - gum::trace!( - target: LOG_TARGET, - relay_parent = ?leaf, - para_id = ?para, - "Potential candidate storage for para: {:?}", - chain.unconnected().map(|candidate| candidate.hash()).collect::>() - ); - } + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + ?para, + ?is_active_leaf, + "Candidate backed. Candidate chain for para: {:?}", + chain.best_chain_vec() + ); + + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + ?para, + ?is_active_leaf, + "Potential candidate storage for para: {:?}", + chain.unconnected().map(|candidate| candidate.hash()).collect::>() + ); } } if !found_para { gum::warn!( target: LOG_TARGET, - para_id = ?para, + ?para, ?candidate_hash, "Received instruction to back a candidate for unscheduled para", ); @@ -649,7 +658,7 @@ async fn handle_candidate_backed( // dropped this other candidate already. gum::debug!( target: LOG_TARGET, - para_id = ?para, + ?para, ?candidate_hash, "Received instruction to back unknown candidate", ); diff --git a/polkadot/node/core/prospective-parachains/src/metrics.rs b/polkadot/node/core/prospective-parachains/src/metrics.rs index 4faeeee0a184..78561bc878ac 100644 --- a/polkadot/node/core/prospective-parachains/src/metrics.rs +++ b/polkadot/node/core/prospective-parachains/src/metrics.rs @@ -149,7 +149,7 @@ impl metrics::Metrics for Metrics { Gauge::new( "polkadot_parachain_prospective_parachains_implicit_view_candidate_count", "Number of candidates present in the implicit view" - )?, + )?, registry )?, }; diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index b3adc30471aa..926f1d141e05 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -2198,7 +2198,7 @@ fn handle_active_leaves_update_gets_candidates_from_parent() { let (candidate_d, candidate_hash_d) = make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_c, 4); - let all_candidates_resp = vec![ + let mut all_candidates_resp = vec![ (candidate_hash_a, leaf_a.hash), (candidate_hash_b, leaf_a.hash), (candidate_hash_c, leaf_a.hash), @@ -2350,6 +2350,58 @@ fn handle_active_leaves_update_gets_candidates_from_parent() { ) .await; + // Deactivate C and add another candidate that will be present on the deactivated parent A. + // When activating C again it should also get the new candidate. Deactivated leaves are + // still updated with new candidates. + deactivate_leaf(&mut virtual_overseer, leaf_c.hash).await; + + let (candidate_e, _) = + make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_d, 5); + activate_leaf_with_parent_hash_fn(&mut virtual_overseer, &leaf_c, &test_state, |hash| { + if hash == leaf_c.hash { + leaf_a.hash + } else { + get_parent_hash(hash) + } + }) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + [candidate_a.hash(), candidate_b.hash()].into_iter().collect(), + 5, + vec![ + (candidate_c.hash(), leaf_a.hash), + (candidate_d.hash(), leaf_a.hash), + (candidate_e.hash(), leaf_a.hash), + ], + ) + .await; + + all_candidates_resp.push((candidate_e.hash(), leaf_a.hash)); + get_backable_candidates( + &mut virtual_overseer, + &leaf_c, + para_id, + Ancestors::new(), + 5, + all_candidates_resp, + ) + .await; + + // Querying the backable candidates for deactivated leaf won't work. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + para_id, + Ancestors::new(), + 5, + vec![], + ) + .await; + virtual_overseer }); From a9d7131e2316cb831acc1ee0746e20db4c0f2f06 Mon Sep 17 00:00:00 2001 From: alindima Date: Fri, 9 Aug 2024 19:03:15 +0300 Subject: [PATCH 56/56] add test for bounded implicit view --- .../core/prospective-parachains/src/tests.rs | 66 +++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 926f1d141e05..14a093239e8e 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -2406,6 +2406,72 @@ fn handle_active_leaves_update_gets_candidates_from_parent() { }); assert_eq!(view.active_leaves.len(), 2); + assert_eq!(view.per_relay_parent.len(), 3); +} + +#[test] +fn handle_active_leaves_update_bounded_implicit_view() { + let para_id = ParaId::from(1); + let mut test_state = TestState::default(); + test_state.claim_queue = test_state + .claim_queue + .into_iter() + .filter(|(_, paras)| matches!(paras.front(), Some(para) if para == ¶_id)) + .collect(); + assert_eq!(test_state.claim_queue.len(), 1); + + let mut leaves = vec![TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![( + para_id, + PerParaData::new(100 - ALLOWED_ANCESTRY_LEN, HeadData(vec![1, 2, 3])), + )], + }]; + + for index in 1..10 { + let prev_leaf = &leaves[index - 1]; + leaves.push(TestLeaf { + number: prev_leaf.number - 1, + hash: get_parent_hash(prev_leaf.hash), + para_data: vec![( + para_id, + PerParaData::new( + prev_leaf.number - 1 - ALLOWED_ANCESTRY_LEN, + HeadData(vec![1, 2, 3]), + ), + )], + }); + } + leaves.reverse(); + + let view = test_harness(|mut virtual_overseer| async { + // Activate first 10 leaves. + for leaf in &leaves[0..10] { + activate_leaf(&mut virtual_overseer, leaf, &test_state).await; + } + + // Now deactivate first 9 leaves. + for leaf in &leaves[0..9] { + deactivate_leaf(&mut virtual_overseer, leaf.hash).await; + } + + virtual_overseer + }); + + // Only latest leaf is active. + assert_eq!(view.active_leaves.len(), 1); + // We keep allowed_ancestry_len implicit leaves. The latest leaf is also present here. + assert_eq!( + view.per_relay_parent.len() as u32, + ASYNC_BACKING_PARAMETERS.allowed_ancestry_len + 1 + ); + + assert_eq!(view.active_leaves, [leaves[9].hash].into_iter().collect()); + assert_eq!( + view.per_relay_parent.into_keys().collect::>(), + leaves[6..].into_iter().map(|l| l.hash).collect::>() + ); } #[test]