diff --git a/Cargo.lock b/Cargo.lock index 8e2551f7cc9bc..66b55a83a0cf5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12568,6 +12568,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", "sp-application-crypto", "sp-keystore", "thiserror", @@ -14642,6 +14643,12 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +[[package]] +name = "relative-path" +version = "1.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e898588f33fdd5b9420719948f9f2a32c922a246964576f71ba7f24f80610fbc" + [[package]] name = "remote-ext-tests-bags-list" version = "1.0.0" @@ -15024,6 +15031,35 @@ dependencies = [ "winapi", ] +[[package]] +name = "rstest" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97eeab2f3c0a199bc4be135c36c924b6590b88c377d416494288c14f2db30199" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version 0.4.0", +] + +[[package]] +name = "rstest_macros" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" +dependencies = [ + "cfg-if", + "glob", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version 0.4.0", + "syn 2.0.48", + "unicode-ident", +] + [[package]] name = "rtnetlink" version = "0.10.1" diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs index 04ee42a9de062..a63c9aa723d10 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs @@ -756,37 +756,31 @@ impl FragmentTree { depths.iter_ones().collect() } - /// Select `count` candidates after the given `required_path` which pass + /// Select `count` candidates after the given `required_ancestors` which pass /// the predicate and have not already been backed on chain. /// - /// Does an exhaustive search into the tree starting after `required_path`. - /// If there are multiple possibilities of size `count`, this will select the first one. - /// If there is no chain of size `count` that matches the criteria, this will return the largest - /// chain it could find with the criteria. + /// Does an exhaustive search into the tree starting after `required_ancestors`. + // If there are multiple possibilities of size `count`, this + /// will select the first one. If there is no chain of size `count` that matches the criteria, + /// this will return the largest chain it could find with the criteria. /// If there are no candidates meeting those criteria, returns an empty `Vec`. /// Cycles are accepted, see module docs for the `Cycles` section. /// - /// The intention of the `required_path` is to allow queries on the basis of + /// The intention of the `required_ancestors` is to allow queries on the basis of /// one or more candidates which were previously pending availability becoming - /// available and opening up more room on the core. + /// available and opening up more room on the cores. + /// + /// Assumes `required_ancestors` is not ordered and takes care of finding a valid path from the + /// root that includes all ancestors. pub(crate) fn select_children( &self, - required_path: &[CandidateHash], + required_ancestors: &[CandidateHash], count: u32, pred: impl Fn(&CandidateHash) -> bool, ) -> Vec { - let base_node = { - // traverse the required path. - let mut node = NodePointer::Root; - for required_step in required_path { - if let Some(next_node) = self.node_candidate_child(node, &required_step) { - node = next_node; - } else { - return vec![] - }; - } - - node + // First, we need to order the required_ancestors. + let Some(base_node) = self.find_path_from_candidates(required_ancestors) else { + return vec![] }; // TODO: taking the first best selection might introduce bias @@ -879,6 +873,66 @@ impl FragmentTree { best_result } + // Orders the ancestors into a viable path from root to the last one. + // Returns the pointer to the last node in the path. + fn find_path_from_candidates( + &self, + required_ancestors: &[CandidateHash], + ) -> Option { + let mut n_required_ancestors = required_ancestors.len(); + // We may have duplicates, which are permitted. Use a HashMap to keep a record of ancestors + // and their occurrence count. + let mut ancestors_map: HashMap = required_ancestors.iter().fold( + HashMap::with_capacity(n_required_ancestors), + |mut acc, candidate| { + acc.entry(*candidate).and_modify(|c| *c += 1).or_insert(1); + acc + }, + ); + + let mut last_node = NodePointer::Root; + let mut next_node: Option = Some(NodePointer::Root); + + while let Some(node) = next_node { + last_node = node; + + next_node = match node { + NodePointer::Root => self + .nodes + .iter() + .enumerate() + .take_while(|n| n.1.parent == NodePointer::Root) + .find_map(|(i, n)| match ancestors_map.get_mut(&n.candidate_hash) { + Some(count) if *count > 0 => { + *count -= 1; + n_required_ancestors -= 1; + Some(NodePointer::Storage(i)) + }, + _ => None, + }), + NodePointer::Storage(ptr) => self.nodes.get(ptr).and_then(|n| { + n.children.iter().find_map(|(node_ptr, hash)| { + match ancestors_map.get_mut(hash) { + Some(count) if *count > 0 => { + *count -= 1; + n_required_ancestors -= 1; + Some(*node_ptr) + }, + _ => None, + } + }) + }), + }; + } + + if n_required_ancestors != 0 { + // Could not traverse the path. We should have used every ancestor. + None + } else { + Some(last_node) + } + } + fn populate_from_bases(&mut self, storage: &CandidateStorage, initial_bases: Vec) { // Populate the tree breadth-first. let mut last_sweep_start = None; @@ -1614,6 +1668,30 @@ mod tests { .collect::>() ); } + assert_eq!( + tree.select_children( + &iter::repeat(candidate_a_hash).take(2).collect::>(), + 2, + |_| true + ), + vec![candidate_a_hash, candidate_a_hash] + ); + assert_eq!( + tree.select_children( + &iter::repeat(candidate_a_hash).take(3).collect::>(), + 3, + |_| true + ), + vec![candidate_a_hash, candidate_a_hash] + ); + assert_eq!( + tree.select_children( + &iter::repeat(candidate_a_hash).take(max_depth + 1).collect::>(), + 5, + |_| true + ), + vec![] + ); } #[test] @@ -1710,6 +1788,38 @@ mod tests { tree.select_children(&[candidate_a_hash, candidate_b_hash], 6, |_| true), vec![candidate_a_hash, candidate_b_hash, candidate_a_hash,], ); + assert_eq!( + tree.select_children(&[candidate_b_hash, candidate_a_hash], 6, |_| true), + vec![candidate_a_hash, candidate_b_hash, candidate_a_hash,], + ); + + // Unordered ancestors. + for count in 1..5 { + assert_eq!( + tree.select_children( + &[candidate_b_hash, candidate_a_hash, candidate_b_hash, candidate_a_hash], + count, + |_| true + ), + vec![candidate_a_hash] + ); + } + + // Invalid ancestors. a->b->a->b->b. + assert_eq!( + tree.select_children( + &[ + candidate_b_hash, + candidate_a_hash, + candidate_b_hash, + candidate_a_hash, + candidate_b_hash + ], + 1, + |_| true + ), + vec![] + ); } #[test] diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 6e6915b92728c..390a56b826d36 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -150,14 +150,14 @@ async fn run_iteration( relay_parent, para, count, - required_path, + required_ancestors, tx, ) => answer_get_backable_candidates( &view, relay_parent, para, count, - required_path, + required_ancestors, tx, ), ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx) => @@ -565,7 +565,7 @@ fn answer_get_backable_candidates( relay_parent: Hash, para: ParaId, count: u32, - required_path: Vec, + required_ancestors: Vec, tx: oneshot::Sender>, ) { let data = match view.active_leaves.get(&relay_parent) { @@ -614,7 +614,7 @@ fn answer_get_backable_candidates( }; let backable_candidates: Vec<_> = tree - .select_children(&required_path, count, |candidate| storage.is_backed(candidate)) + .select_children(&required_ancestors, count, |candidate| storage.is_backed(candidate)) .into_iter() .filter_map(|child_hash| { storage.relay_parent_by_candidate_hash(&child_hash).map_or_else( @@ -635,7 +635,7 @@ fn answer_get_backable_candidates( if backable_candidates.is_empty() { gum::trace!( target: LOG_TARGET, - ?required_path, + ?required_ancestors, para_id = ?para, %relay_parent, "Could not find any backable candidate", diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 732736b101de0..4f5f7888e7a63 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -407,7 +407,7 @@ async fn get_backable_candidates( virtual_overseer: &mut VirtualOverseer, leaf: &TestLeaf, para_id: ParaId, - required_path: Vec, + required_ancestors: Vec, count: u32, expected_result: Vec<(CandidateHash, Hash)>, ) { @@ -418,7 +418,7 @@ async fn get_backable_candidates( leaf.hash, para_id, count, - required_path, + required_ancestors, tx, ), }) @@ -1172,42 +1172,63 @@ fn check_backable_query_multiple_candidates() { } } - // required path of 2 + // required path of 2 and higher { + // required_ancestors does not need to be ordered. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_b], + vec![candidate_hash_a, candidate_hash_i, candidate_hash_h, candidate_hash_c], 1, - vec![(candidate_hash_d, leaf_a.hash)], + vec![(candidate_hash_j, leaf_a.hash)], ) .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), + + for combination in vec![ + vec![candidate_hash_a, candidate_hash_b], + vec![candidate_hash_b, candidate_hash_a], + ] { + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + combination, + 1, + vec![(candidate_hash_d, leaf_a.hash)], + ) + .await; + } + for combination in vec![ vec![candidate_hash_a, candidate_hash_c], - 1, - vec![(candidate_hash_h, leaf_a.hash)], - ) - .await; - // If the requested count exceeds the largest chain, return the longest - // chain we can get. - for count in 4..10 { + vec![candidate_hash_c, candidate_hash_a], + ] { get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_c], - count, - vec![ - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - (candidate_hash_j, leaf_a.hash), - ], + combination.clone(), + 1, + vec![(candidate_hash_h, leaf_a.hash)], ) .await; + // If the requested count exceeds the largest chain, return the longest + // chain we can get. + for count in 4..10 { + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + combination.clone(), + count, + vec![ + (candidate_hash_h, leaf_a.hash), + (candidate_hash_i, leaf_a.hash), + (candidate_hash_j, leaf_a.hash), + ], + ) + .await; + } } } @@ -1215,6 +1236,7 @@ fn check_backable_query_multiple_candidates() { { let required_paths = vec![ vec![candidate_hash_a, candidate_hash_b, candidate_hash_e], + vec![candidate_hash_b, candidate_hash_a, candidate_hash_e], vec![ candidate_hash_a, candidate_hash_c, @@ -1222,6 +1244,13 @@ fn check_backable_query_multiple_candidates() { candidate_hash_i, candidate_hash_j, ], + vec![ + candidate_hash_a, + candidate_hash_c, + candidate_hash_h, + candidate_hash_j, + candidate_hash_i, + ], ]; for path in required_paths { for count in 1..4 { @@ -1238,7 +1267,7 @@ fn check_backable_query_multiple_candidates() { } } - // Should not get anything at the wrong path. + // Should not get anything at the wrong paths. get_backable_candidates( &mut virtual_overseer, &leaf_a, @@ -1252,7 +1281,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b, candidate_hash_a], + vec![candidate_hash_b, candidate_hash_f], 3, vec![], ) @@ -1261,8 +1290,17 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_b, candidate_hash_c], - 3, + vec![candidate_hash_a, candidate_hash_h], + 2, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_e, candidate_hash_h], + 2, vec![], ) .await; @@ -1282,7 +1320,7 @@ fn check_backable_query_multiple_candidates() { // (imaginary root) // | | // +----B---+ A - // | | | | + // | | | | // | | | C // D E F | // | H @@ -1449,42 +1487,55 @@ fn check_backable_query_multiple_candidates() { } } - // required path of 2 + // required path of 2 or higher { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), + // required_ancestors does not need to be ordered. + + for combination in vec![ vec![candidate_hash_b, candidate_hash_f], - 1, - vec![(candidate_hash_g, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), + vec![candidate_hash_f, candidate_hash_b], + ] { + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + combination, + 1, + vec![(candidate_hash_g, leaf_a.hash)], + ) + .await; + } + + for combination in vec![ vec![candidate_hash_a, candidate_hash_c], - 1, - vec![(candidate_hash_h, leaf_a.hash)], - ) - .await; - // If the requested count exceeds the largest chain, return the longest - // chain we can get. - for count in 4..10 { + vec![candidate_hash_c, candidate_hash_a], + ] { get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_c], - count, - vec![ - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - (candidate_hash_j, leaf_a.hash), - ], + combination.clone(), + 1, + vec![(candidate_hash_h, leaf_a.hash)], ) .await; + // If the requested count exceeds the largest chain, return the longest + // chain we can get. + for count in 4..10 { + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + combination.clone(), + count, + vec![ + (candidate_hash_h, leaf_a.hash), + (candidate_hash_i, leaf_a.hash), + (candidate_hash_j, leaf_a.hash), + ], + ) + .await; + } } } @@ -1493,6 +1544,8 @@ fn check_backable_query_multiple_candidates() { let required_paths = vec![ vec![candidate_hash_b, candidate_hash_f, candidate_hash_g], vec![candidate_hash_b, candidate_hash_e], + vec![candidate_hash_d], + vec![candidate_hash_e, candidate_hash_b], vec![candidate_hash_b, candidate_hash_d], vec![ candidate_hash_a, @@ -1501,6 +1554,13 @@ fn check_backable_query_multiple_candidates() { candidate_hash_i, candidate_hash_j, ], + vec![ + candidate_hash_c, + candidate_hash_a, + candidate_hash_h, + candidate_hash_j, + candidate_hash_i, + ], ]; for path in required_paths { for count in 1..4 { @@ -1522,7 +1582,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_d], + vec![candidate_hash_h], 1, vec![], ) @@ -1536,6 +1596,34 @@ fn check_backable_query_multiple_candidates() { vec![], ) .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, candidate_hash_b], + 3, + vec![], + ) + .await; + // Non-existent candidate. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, CandidateHash(Hash::from_low_u64_be(100))], + 3, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, candidate_hash_a], + 3, + vec![], + ) + .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 175980e90a057..12c0c9913c804 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -26,3 +26,4 @@ sp-application-crypto = { path = "../../../../substrate/primitives/application-c sp-keystore = { path = "../../../../substrate/primitives/keystore" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +rstest = "0.18.2" diff --git a/polkadot/node/core/provisioner/src/error.rs b/polkadot/node/core/provisioner/src/error.rs index 376d69f276fc8..afab26c60e4c3 100644 --- a/polkadot/node/core/provisioner/src/error.rs +++ b/polkadot/node/core/provisioner/src/error.rs @@ -50,8 +50,8 @@ pub enum Error { #[error("failed to get votes on dispute")] CanceledCandidateVotes(#[source] oneshot::Canceled), - #[error("failed to get backable candidate from prospective parachains")] - CanceledBackableCandidate(#[source] oneshot::Canceled), + #[error("failed to get backable candidates from prospective parachains")] + CanceledBackableCandidates(#[source] oneshot::Canceled), #[error(transparent)] ChainApi(#[from] ChainApiError), diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 51f768d782e0b..054cfc71b19a8 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -43,7 +43,7 @@ use polkadot_primitives::{ BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreState, Hash, Id as ParaId, OccupiedCoreAssumption, SignedAvailabilityBitfield, ValidatorIndex, }; -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; mod disputes; mod error; @@ -566,6 +566,9 @@ async fn select_candidate_hashes_from_tracked( "Candidate receipts (before selection)", ); + // Used to track already scheduled paras. + let mut scheduled_paras = HashSet::with_capacity(candidates.len()); + for (core_idx, core) in availability_cores.iter().enumerate() { let (scheduled_core, assumption) = match core { CoreState::Scheduled(scheduled_core) => (scheduled_core, OccupiedCoreAssumption::Free), @@ -579,6 +582,9 @@ async fn select_candidate_hashes_from_tracked( } } else { if occupied_core.time_out_at != block_number { + // Record that this para is already occupying a core. + scheduled_paras.insert(occupied_core.para_id()); + continue } if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out { @@ -591,6 +597,12 @@ async fn select_candidate_hashes_from_tracked( CoreState::Free => continue, }; + // We've already selected one candidate for this para. Elastic scaling only works if + // prospective-parachains are enabled. + if scheduled_paras.contains(&scheduled_core.para_id) { + continue + } + let validation_data = match request_persisted_validation_data( relay_parent, scheduled_core.para_id, @@ -624,6 +636,7 @@ async fn select_candidate_hashes_from_tracked( "Selected candidate receipt", ); + scheduled_paras.insert(scheduled_core.para_id); selected_candidates.push((candidate_hash, candidate.descriptor.relay_parent)); } } @@ -643,57 +656,100 @@ async fn request_backable_candidates( ) -> Result, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; - let mut selected_candidates = Vec::with_capacity(availability_cores.len()); + // The required ancestors for a para are not sorted. This information is useless if the para has + // been timed out. + let mut required_ancestors: HashMap> = HashMap::new(); + + let mut newly_available_cores = HashSet::new(); + // We first need to record which candidates timed out while pending availability. + // Any one candidate that is timed out will empty all of this para's cores. + let mut timed_out_paras = HashSet::new(); for (core_idx, core) in availability_cores.iter().enumerate() { - let (para_id, required_path) = match core { + match core { + CoreState::Occupied(occupied_core) => { + required_ancestors + .entry(occupied_core.para_id()) + .or_default() + .push(occupied_core.candidate_hash); + + if timed_out_paras.contains(&occupied_core.para_id()) || + bitfields_indicate_availability( + core_idx, + bitfields, + &occupied_core.availability, + ) { + newly_available_cores.insert(core_idx); + } else if occupied_core.time_out_at <= block_number { + newly_available_cores.insert(core_idx); + timed_out_paras.insert(occupied_core.para_id()); + } + }, + CoreState::Scheduled(_) => { + newly_available_cores.insert(core_idx); + }, + CoreState::Free => continue, + }; + } + + // Record how many candidates we'll need to request for each para id. Use a BTreeMap because + // we'll need to iterate through them and it's more efficient for that. + let mut requested_counts: BTreeMap = BTreeMap::new(); + + for core_idx in newly_available_cores { + let core = &availability_cores[core_idx]; + + match core { CoreState::Scheduled(scheduled_core) => { - // The core is free, pick the first eligible candidate from - // the fragment tree. - (scheduled_core.para_id, Vec::new()) + requested_counts + .entry(scheduled_core.para_id) + .and_modify(|c| *c += 1) + .or_insert(1); }, CoreState::Occupied(occupied_core) => { - if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability) - { - if let Some(ref scheduled_core) = occupied_core.next_up_on_available { - // The candidate occupying the core is available, choose its - // child in the fragment tree. - // - // TODO: doesn't work for on-demand parachains. We lean hard on the - // assumption that cores are fixed to specific parachains within a session. - // https://github.com/paritytech/polkadot/issues/5492 - (scheduled_core.para_id, vec![occupied_core.candidate_hash]) - } else { - continue + if timed_out_paras.contains(&occupied_core.para_id()) { + if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out { + requested_counts + .entry(scheduled_core.para_id) + .and_modify(|c| *c += 1) + .or_insert(1); } } else { - if occupied_core.time_out_at != block_number { - continue - } - if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out { - // Candidate's availability timed out, practically same as scheduled. - (scheduled_core.para_id, Vec::new()) - } else { - continue + if let Some(ref scheduled_core) = occupied_core.next_up_on_available { + requested_counts + .entry(scheduled_core.para_id) + .and_modify(|c| *c += 1) + .or_insert(1); } } }, CoreState::Free => continue, - }; + } + } + + let mut selected_candidates = Vec::with_capacity(availability_cores.len()); - let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; + for (para_id, count) in requested_counts { + let required_ancestors = if timed_out_paras.contains(¶_id) { + vec![] + } else { + required_ancestors.remove(¶_id).unwrap_or_default() + }; + // Prospective-parachains will order the required_ancestors for us. + let response = + get_backable_candidates(relay_parent, para_id, required_ancestors, count, sender) + .await?; - match response { - Some((hash, relay_parent)) => selected_candidates.push((hash, relay_parent)), - None => { - gum::debug!( - target: LOG_TARGET, - leaf_hash = ?relay_parent, - core = core_idx, - "No backable candidate returned by prospective parachains", - ); - }, + if response.is_empty() { + gum::debug!( + target: LOG_TARGET, + leaf_hash = ?relay_parent, + ?para_id, + "No backable candidate returned by prospective parachains", + ); } + + selected_candidates.extend(response.into_iter()); } Ok(selected_candidates) @@ -796,28 +852,27 @@ async fn get_block_number_under_construction( } } -/// Requests backable candidate from Prospective Parachains based on -/// the given path in the fragment tree. -async fn get_backable_candidate( +/// Requests backable candidates from Prospective Parachains based on +/// the given ancestors in the fragment tree. The ancestors may not be ordered. +async fn get_backable_candidates( relay_parent: Hash, para_id: ParaId, - required_path: Vec, + required_ancestors: Vec, + count: u32, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result, Error> { let (tx, rx) = oneshot::channel(); sender .send_message(ProspectiveParachainsMessage::GetBackableCandidates( relay_parent, para_id, - 1, // core count hardcoded to 1, until elastic scaling is implemented and enabled. - required_path, + count, + required_ancestors, tx, )) .await; - rx.await - .map_err(Error::CanceledBackableCandidate) - .map(|res| res.get(0).copied()) + rx.await.map_err(Error::CanceledBackableCandidates) } /// The availability bitfield for a given core is the transpose diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index b26df8ddb9104..aaced3cc61194 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -22,6 +22,9 @@ use polkadot_primitives::{OccupiedCore, ScheduledCore}; const MOCK_GROUP_SIZE: usize = 5; pub fn occupied_core(para_id: u32) -> CoreState { + let mut candidate_descriptor = dummy_candidate_descriptor(dummy_hash()); + candidate_descriptor.para_id = para_id.into(); + CoreState::Occupied(OccupiedCore { group_responsible: para_id.into(), next_up_on_available: None, @@ -29,7 +32,7 @@ pub fn occupied_core(para_id: u32) -> CoreState { time_out_at: 200_u32, next_up_on_time_out: None, availability: bitvec![u8, bitvec::order::Lsb0; 0; 32], - candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), + candidate_descriptor, candidate_hash: Default::default(), }) } @@ -254,10 +257,11 @@ mod select_candidates { use polkadot_primitives::{ BlockNumber, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, }; + use rstest::rstest; const BLOCK_UNDER_PRODUCTION: BlockNumber = 128; - // For test purposes, we always return this set of availability cores: + // For testing only one core assigned to a parachain, we return this set of availability cores: // // [ // 0: Free, @@ -273,7 +277,7 @@ mod select_candidates { // 10: Occupied(both next_up set, not available, timeout), // 11: Occupied(next_up_on_available and available, but different successor para_id) // ] - fn mock_availability_cores() -> Vec { + fn mock_availability_cores_one_per_para() -> Vec { use std::ops::Not; use CoreState::{Free, Scheduled}; @@ -333,9 +337,177 @@ mod select_candidates { ] } + // For test purposes with multiple possible cores assigned to a para, we always return this set + // of availability cores: + // + // [ + // 0: Free, + // 1: Scheduled(default), + // 2: Occupied(no next_up set), + // 3: Occupied(next_up_on_available set but not available), + // 4: Occupied(next_up_on_available set and available), + // 5: Occupied(next_up_on_time_out set but not timeout), + // 6: Occupied(next_up_on_time_out set and timeout but available), + // 7: Occupied(next_up_on_time_out set and timeout and not available), + // 8: Occupied(both next_up set, available), + // 9: Occupied(both next_up set, not available, no timeout), + // 10: Occupied(both next_up set, not available, timeout), + // 11: Occupied(next_up_on_available and available, but different successor para_id) + // ] + fn mock_availability_cores_multiple_per_para() -> Vec { + use std::ops::Not; + use CoreState::{Free, Scheduled}; + + vec![ + // 0: Free, + Free, + // 1: Scheduled(default), + Scheduled(scheduled_core(1)), + // 2: Occupied(no next_up set), + occupied_core(2), + // 3: Occupied(next_up_on_available set but not available), + build_occupied_core(3, |core| { + core.next_up_on_available = Some(scheduled_core(3)); + }), + // 4: Occupied(next_up_on_available set and available), + build_occupied_core(4, |core| { + core.next_up_on_available = Some(scheduled_core(4)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(41)); + }), + // 5: Occupied(next_up_on_time_out set but not timeout), + build_occupied_core(5, |core| { + core.next_up_on_time_out = Some(scheduled_core(5)); + }), + // 6: Occupied(next_up_on_time_out set and timeout but available), + build_occupied_core(6, |core| { + core.next_up_on_time_out = Some(scheduled_core(6)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.availability = core.availability.clone().not(); + }), + // 7: Occupied(next_up_on_time_out set and timeout and not available), + build_occupied_core(7, |core| { + core.next_up_on_time_out = Some(scheduled_core(7)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + }), + // 8: Occupied(both next_up set, available), + build_occupied_core(8, |core| { + core.next_up_on_available = Some(scheduled_core(8)); + core.next_up_on_time_out = Some(scheduled_core(8)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(81)); + }), + // 9: Occupied(both next_up set, not available, no timeout), + build_occupied_core(9, |core| { + core.next_up_on_available = Some(scheduled_core(9)); + core.next_up_on_time_out = Some(scheduled_core(9)); + }), + // 10: Occupied(both next_up set, not available, timeout), + build_occupied_core(10, |core| { + core.next_up_on_available = Some(scheduled_core(10)); + core.next_up_on_time_out = Some(scheduled_core(10)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + }), + // 11: Occupied(next_up_on_available and available, but different successor para_id) + build_occupied_core(11, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + }), + // 12-14: Occupied(next_up_on_available and available, same para_id). + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(121)); + }), + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(122)); + }), + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(121)); + }), + // 15: Scheduled on same para_id as 12-14. + Scheduled(scheduled_core(12)), + // 16: Occupied(13, no next_up set, not available) + build_occupied_core(13, |core| { + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(131)); + }), + // 17: Occupied(13, no next_up set, available) + build_occupied_core(13, |core| { + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(132)); + }), + // 18: Occupied(13, next_up_on_available set to 13 but not available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(133)); + }), + // 19: Occupied(13, next_up_on_available set to 13 and available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(134)); + }), + // 20: Occupied(13, next_up_on_time_out set to 13 but not timeout) + build_occupied_core(13, |core| { + core.next_up_on_time_out = Some(scheduled_core(13)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(135)); + }), + // 21: Occupied(13, next_up_on_available set to 14 and available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(136)); + }), + // 22: Occupied(13, next_up_on_available set to 14 but not available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(137)); + }), + // 23: Occupied(13, both next_up set to 14, available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.next_up_on_time_out = Some(scheduled_core(14)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(138)); + }), + // 25: Occupied(13, next_up_on_available and available, but successor para_id 15) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(15)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(139)); + }), + // 26: Occupied(15, next_up_on_available and available, but successor para_id 13) + build_occupied_core(15, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(151)); + }), + // 24: Occupied(13, both next_up set to 14, not available, timeout) + // build_occupied_core(13, |core| { + // core.next_up_on_available = Some(scheduled_core(14)); + // core.next_up_on_time_out = Some(scheduled_core(14)); + // core.time_out_at = BLOCK_UNDER_PRODUCTION; + // }), + + // 27: Occupied(15, both next_up, both available and timed out) + // build_occupied_core(15, |core| { + // core.next_up_on_available = Some(scheduled_core(15)); + // core.availability = core.availability.clone().not(); + // core.candidate_hash = CandidateHash(Hash::from_low_u64_be(152)); + // core.time_out_at = BLOCK_UNDER_PRODUCTION; + // }), + ] + } + async fn mock_overseer( mut receiver: mpsc::UnboundedReceiver, + mock_availability_cores: Vec, expected: Vec, + mut required_ancestors: HashMap, Vec>, prospective_parachains_mode: ProspectiveParachainsMode, ) { use ChainApiMessage::BlockNumber; @@ -356,7 +528,7 @@ mod select_candidates { PersistedValidationDataReq(_para_id, _assumption, tx), )) => tx.send(Ok(Some(Default::default()))).unwrap(), AllMessages::RuntimeApi(Request(_parent_hash, AvailabilityCores(tx))) => - tx.send(Ok(mock_availability_cores())).unwrap(), + tx.send(Ok(mock_availability_cores.clone())).unwrap(), AllMessages::CandidateBacking(CandidateBackingMessage::GetBackedCandidates( hashes, sender, @@ -373,30 +545,63 @@ mod select_candidates { let _ = sender.send(response); }, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetBackableCandidates(_, _, count, _, tx), - ) => { - assert_eq!(count, 1); - - match prospective_parachains_mode { - ProspectiveParachainsMode::Enabled { .. } => { - let _ = - tx.send(candidates_iter.next().map_or_else(Vec::new, |c| vec![c])); - }, - ProspectiveParachainsMode::Disabled => - panic!("unexpected prospective parachains request"), - } + ProspectiveParachainsMessage::GetBackableCandidates( + _, + _para_id, + count, + actual_required_ancestors, + tx, + ), + ) => match prospective_parachains_mode { + ProspectiveParachainsMode::Enabled { .. } => { + assert!(count > 0); + let candidates = + (&mut candidates_iter).take(count as usize).collect::>(); + assert_eq!(candidates.len(), count as usize); + + if let Some(expected_required_ancestors) = required_ancestors.remove( + &(candidates + .clone() + .into_iter() + .take(actual_required_ancestors.len()) + .map(|(c_hash, _)| c_hash) + .collect::>()), + ) { + assert_eq!(expected_required_ancestors, actual_required_ancestors); + } else { + assert_eq!(actual_required_ancestors.len(), 0); + } + + let _ = tx.send(candidates); + }, + ProspectiveParachainsMode::Disabled => + panic!("unexpected prospective parachains request"), }, _ => panic!("Unexpected message: {:?}", from_job), } } + + if let ProspectiveParachainsMode::Enabled { .. } = prospective_parachains_mode { + assert_eq!(candidates_iter.next(), None); + } + assert_eq!(required_ancestors.len(), 0); } - #[test] - fn can_succeed() { + #[rstest] + #[case(ProspectiveParachainsMode::Disabled)] + #[case(ProspectiveParachainsMode::Enabled {max_candidate_depth: 0, allowed_ancestry_len: 0})] + fn can_succeed(#[case] prospective_parachains_mode: ProspectiveParachainsMode) { test_harness( - |r| mock_overseer(r, Vec::new(), ProspectiveParachainsMode::Disabled), + |r| { + mock_overseer( + r, + Vec::new(), + Vec::new(), + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { - let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; select_candidates( &[], &[], @@ -411,13 +616,18 @@ mod select_candidates { ) } - // this tests that only the appropriate candidates get selected. - // To accomplish this, we supply a candidate list containing one candidate per possible core; - // the candidate selection algorithm must filter them to the appropriate set - #[test] - fn selects_correct_candidates() { - let mock_cores = mock_availability_cores(); - + // Test candidate selection when prospective parachains mode is disabled. + // This tests that only the appropriate candidates get selected when prospective parachains mode + // is disabled. To accomplish this, we supply a candidate list containing one candidate per + // possible core; the candidate selection algorithm must filter them to the appropriate set + #[rstest] + // why those particular indices? see the comments on mock_availability_cores_*() functions. + #[case(mock_availability_cores_one_per_para(), vec![1, 4, 7, 8, 10])] + #[case(mock_availability_cores_multiple_per_para(), vec![1, 4, 7, 8, 10, 12, 14, 15])] + fn test_in_subsystem_selection( + #[case] mock_cores: Vec, + #[case] expected_candidates: Vec, + ) { let empty_hash = PersistedValidationData::::default().hash(); let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); @@ -453,9 +663,8 @@ mod select_candidates { }) .collect(); - // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + expected_candidates.into_iter().map(|idx| candidates[idx].clone()).collect(); let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; let expected_backed = expected_candidates @@ -470,88 +679,19 @@ mod select_candidates { }) .collect(); + let mock_cores_clone = mock_cores.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), - |mut tx: TestSubsystemSender| async move { - let result = select_candidates( - &mock_cores, - &[], - &candidates, + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), prospective_parachains_mode, - Default::default(), - &mut tx, ) - .await - .unwrap(); - - result.into_iter().for_each(|c| { - assert!( - expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), - "Failed to find candidate: {:?}", - c, - ) - }); }, - ) - } - - #[test] - fn selects_max_one_code_upgrade() { - let mock_cores = mock_availability_cores(); - - let empty_hash = PersistedValidationData::::default().hash(); - - // why those particular indices? see the comments on mock_availability_cores() - // the first candidate with code is included out of [1, 4, 7, 8, 10]. - let cores = [1, 4, 7, 8, 10]; - let cores_with_code = [1, 4, 8]; - - let expected_cores = [1, 7, 10]; - - let committed_receipts: Vec<_> = (0..mock_cores.len()) - .map(|i| { - let mut descriptor = dummy_candidate_descriptor(dummy_hash()); - descriptor.para_id = i.into(); - descriptor.persisted_validation_data_hash = empty_hash; - CommittedCandidateReceipt { - descriptor, - commitments: CandidateCommitments { - new_validation_code: if cores_with_code.contains(&i) { - Some(vec![].into()) - } else { - None - }, - ..Default::default() - }, - } - }) - .collect(); - - // Input to select_candidates - let candidates: Vec<_> = committed_receipts.iter().map(|r| r.to_plain()).collect(); - // Build possible outputs from select_candidates - let backed_candidates: Vec<_> = committed_receipts - .iter() - .map(|committed_receipt| BackedCandidate { - candidate: committed_receipt.clone(), - validity_votes: Vec::new(), - validator_indices: default_bitvec(MOCK_GROUP_SIZE), - }) - .collect(); - - // First, provisioner will request backable candidates for each scheduled core. - // Then, some of them get filtered due to new validation code rule. - let expected_backed: Vec<_> = - cores.iter().map(|&idx| backed_candidates[idx].clone()).collect(); - let expected_backed_filtered: Vec<_> = - expected_cores.iter().map(|&idx| candidates[idx].clone()).collect(); - - let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; - - test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), |mut tx: TestSubsystemSender| async move { - let result = select_candidates( + let result: Vec = select_candidates( &mock_cores, &[], &candidates, @@ -562,11 +702,9 @@ mod select_candidates { .await .unwrap(); - assert_eq!(result.len(), 3); - result.into_iter().for_each(|c| { assert!( - expected_backed_filtered.iter().any(|c2| c.candidate.corresponds_to(c2)), + expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), "Failed to find candidate: {:?}", c, ) @@ -575,9 +713,90 @@ mod select_candidates { ) } + // #[test] + // fn selects_max_one_code_upgrade() { + // let mock_cores = mock_availability_cores(); + + // let empty_hash = PersistedValidationData::::default().hash(); + + // // why those particular indices? see the comments on mock_availability_cores() + // // the first candidate with code is included out of [1, 4, 7, 8, 10]. + // let cores = [1, 4, 7, 8, 10]; + // let cores_with_code = [1, 4, 8]; + + // let expected_cores = [1, 7, 10]; + + // let committed_receipts: Vec<_> = (0..mock_cores.len()) + // .map(|i| { + // let mut descriptor = dummy_candidate_descriptor(dummy_hash()); + // descriptor.para_id = i.into(); + // descriptor.persisted_validation_data_hash = empty_hash; + // CommittedCandidateReceipt { + // descriptor, + // commitments: CandidateCommitments { + // new_validation_code: if cores_with_code.contains(&i) { + // Some(vec![].into()) + // } else { + // None + // }, + // ..Default::default() + // }, + // } + // }) + // .collect(); + + // // Input to select_candidates + // let candidates: Vec<_> = committed_receipts.iter().map(|r| r.to_plain()).collect(); + // // Build possible outputs from select_candidates + // let backed_candidates: Vec<_> = committed_receipts + // .iter() + // .map(|committed_receipt| BackedCandidate { + // candidate: committed_receipt.clone(), + // validity_votes: Vec::new(), + // validator_indices: default_bitvec(MOCK_GROUP_SIZE), + // }) + // .collect(); + + // // First, provisioner will request backable candidates for each scheduled core. + // // Then, some of them get filtered due to new validation code rule. + // let expected_backed: Vec<_> = + // cores.iter().map(|&idx| backed_candidates[idx].clone()).collect(); + // let expected_backed_filtered: Vec<_> = + // expected_cores.iter().map(|&idx| candidates[idx].clone()).collect(); + + // let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; + + // test_harness( + // |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + // |mut tx: TestSubsystemSender| async move { + // let result = select_candidates( + // &mock_cores, + // &[], + // &candidates, + // prospective_parachains_mode, + // Default::default(), + // &mut tx, + // ) + // .await + // .unwrap(); + + // assert_eq!(result.len(), 3); + + // result.into_iter().for_each(|c| { + // assert!( + // expected_backed_filtered.iter().any(|c2| c.candidate.corresponds_to(c2)), + // "Failed to find candidate: {:?}", + // c, + // ) + // }); + // }, + // ) + // } + #[test] fn request_from_prospective_parachains() { - let mock_cores = mock_availability_cores(); + let mock_cores = mock_availability_cores_multiple_per_para(); + let empty_hash = PersistedValidationData::::default().hash(); let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); @@ -597,8 +816,10 @@ mod select_candidates { .collect(); // why those particular indices? see the comments on mock_availability_cores() - let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + let expected_candidates: Vec<_> = [1, 4, 7, 8, 10, 12, 12, 12, 12, 12, 13, 13, 14, 14, 15] + .iter() + .map(|&idx| candidates[idx].clone()) + .collect(); // Expect prospective parachains subsystem requests. let prospective_parachains_mode = ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; @@ -615,74 +836,38 @@ mod select_candidates { }) .collect(); + let mut required_ancestors = HashMap::new(); + required_ancestors + .insert(vec![candidates[4].hash()], vec![CandidateHash(Hash::from_low_u64_be(41))]); + required_ancestors + .insert(vec![candidates[8].hash()], vec![CandidateHash(Hash::from_low_u64_be(81))]); + required_ancestors.insert( + [12, 12, 12].iter().map(|&idx| candidates[idx].hash()).collect::>(), + vec![ + CandidateHash(Hash::from_low_u64_be(121)), + CandidateHash(Hash::from_low_u64_be(122)), + CandidateHash(Hash::from_low_u64_be(121)), + ], + ); + required_ancestors.insert( + [13, 13].iter().map(|&idx| candidates[idx].hash()).collect::>(), + (131..=139).map(|num| CandidateHash(Hash::from_low_u64_be(num))).collect(), + ); + + required_ancestors + .insert(vec![candidates[15].hash()], vec![CandidateHash(Hash::from_low_u64_be(151))]); + + let mock_cores_clone = mock_cores.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), - |mut tx: TestSubsystemSender| async move { - let result = select_candidates( - &mock_cores, - &[], - &[], + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + required_ancestors, prospective_parachains_mode, - Default::default(), - &mut tx, ) - .await - .unwrap(); - - result.into_iter().for_each(|c| { - assert!( - expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), - "Failed to find candidate: {:?}", - c, - ) - }); }, - ) - } - - #[test] - fn request_receipts_based_on_relay_parent() { - let mock_cores = mock_availability_cores(); - let empty_hash = PersistedValidationData::::default().hash(); - - let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); - descriptor_template.persisted_validation_data_hash = empty_hash; - let candidate_template = CandidateReceipt { - descriptor: descriptor_template, - commitments_hash: CandidateCommitments::default().hash(), - }; - - let candidates: Vec<_> = std::iter::repeat(candidate_template) - .take(mock_cores.len()) - .enumerate() - .map(|(idx, mut candidate)| { - candidate.descriptor.para_id = idx.into(); - candidate.descriptor.relay_parent = Hash::repeat_byte(idx as u8); - candidate - }) - .collect(); - - // why those particular indices? see the comments on mock_availability_cores() - let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); - // Expect prospective parachains subsystem requests. - let prospective_parachains_mode = - ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; - - let expected_backed = expected_candidates - .iter() - .map(|c| BackedCandidate { - candidate: CommittedCandidateReceipt { - descriptor: c.descriptor.clone(), - commitments: Default::default(), - }, - validity_votes: Vec::new(), - validator_indices: default_bitvec(MOCK_GROUP_SIZE), - }) - .collect(); - - test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, @@ -695,6 +880,7 @@ mod select_candidates { .await .unwrap(); + assert_eq!(result.len(), expected_candidates.len()); result.into_iter().for_each(|c| { assert!( expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), @@ -705,4 +891,70 @@ mod select_candidates { }, ) } + + // #[test] + // fn request_receipts_based_on_relay_parent() { + // let mock_cores = mock_availability_cores(); + // let empty_hash = PersistedValidationData::::default().hash(); + + // let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); + // descriptor_template.persisted_validation_data_hash = empty_hash; + // let candidate_template = CandidateReceipt { + // descriptor: descriptor_template, + // commitments_hash: CandidateCommitments::default().hash(), + // }; + + // let candidates: Vec<_> = std::iter::repeat(candidate_template) + // .take(mock_cores.len()) + // .enumerate() + // .map(|(idx, mut candidate)| { + // candidate.descriptor.para_id = idx.into(); + // candidate.descriptor.relay_parent = Hash::repeat_byte(idx as u8); + // candidate + // }) + // .collect(); + + // // why those particular indices? see the comments on mock_availability_cores() + // let expected_candidates: Vec<_> = + // [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + // // Expect prospective parachains subsystem requests. + // let prospective_parachains_mode = + // ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; + + // let expected_backed = expected_candidates + // .iter() + // .map(|c| BackedCandidate { + // candidate: CommittedCandidateReceipt { + // descriptor: c.descriptor.clone(), + // commitments: Default::default(), + // }, + // validity_votes: Vec::new(), + // validator_indices: default_bitvec(MOCK_GROUP_SIZE), + // }) + // .collect(); + + // test_harness( + // |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + // |mut tx: TestSubsystemSender| async move { + // let result = select_candidates( + // &mock_cores, + // &[], + // &[], + // prospective_parachains_mode, + // Default::default(), + // &mut tx, + // ) + // .await + // .unwrap(); + + // result.into_iter().for_each(|c| { + // assert!( + // expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), + // "Failed to find candidate: {:?}", + // c, + // ) + // }); + // }, + // ) + // } } diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 549e43a671d6b..53363607b4ff5 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -1130,6 +1130,7 @@ pub enum ProspectiveParachainsMessage { CandidateBacked(ParaId, CandidateHash), /// Get N backable candidate hashes along with their relay parents for the given parachain, /// under the given relay-parent hash, which is a descendant of the given candidate hashes. + /// The candidate hashes need not be ordered. The subsystem will take care of that. /// N should represent the number of scheduled cores of this ParaId. /// Returns `None` on the channel if no such candidate exists. GetBackableCandidates(