diff --git a/Cargo.lock b/Cargo.lock
index a6c2bc1fd318..02e7d0d24bda 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -14299,6 +14299,7 @@ dependencies = [
"polkadot-primitives",
"polkadot-primitives-test-helpers",
"rstest",
+ "schnellru",
"sp-core 28.0.0",
"sp-keyring",
"sp-maybe-compressed-blob 11.0.0",
@@ -15138,6 +15139,7 @@ dependencies = [
"sp-runtime 31.0.1",
"sp-staking",
"sp-std 14.0.0",
+ "thiserror",
]
[[package]]
diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml
index 855b6b0e86eb..777458673f5b 100644
--- a/polkadot/node/collation-generation/Cargo.toml
+++ b/polkadot/node/collation-generation/Cargo.toml
@@ -21,6 +21,7 @@ sp-core = { workspace = true, default-features = true }
sp-maybe-compressed-blob = { workspace = true, default-features = true }
thiserror = { workspace = true }
codec = { features = ["bit-vec", "derive"], workspace = true }
+schnellru = { workspace = true }
[dev-dependencies]
polkadot-node-subsystem-test-helpers = { workspace = true }
diff --git a/polkadot/node/collation-generation/src/error.rs b/polkadot/node/collation-generation/src/error.rs
index f04e3c4f20b4..68902f58579a 100644
--- a/polkadot/node/collation-generation/src/error.rs
+++ b/polkadot/node/collation-generation/src/error.rs
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see .
+use polkadot_primitives::vstaging::CandidateReceiptError;
use thiserror::Error;
#[derive(Debug, Error)]
@@ -30,8 +31,12 @@ pub enum Error {
UtilRuntime(#[from] polkadot_node_subsystem_util::runtime::Error),
#[error(transparent)]
Erasure(#[from] polkadot_erasure_coding::Error),
- #[error("Parachain backing state not available in runtime.")]
- MissingParaBackingState,
+ #[error("Collation submitted before initialization")]
+ SubmittedBeforeInit,
+ #[error("V2 core index check failed: {0}")]
+ CandidateReceiptCheck(CandidateReceiptError),
+ #[error("PoV size {0} exceeded maximum size of {1}")]
+ POVSizeExceeded(usize, usize),
}
pub type Result = std::result::Result;
diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs
index f04f69cbd380..9e975acf10b8 100644
--- a/polkadot/node/collation-generation/src/lib.rs
+++ b/polkadot/node/collation-generation/src/lib.rs
@@ -32,27 +32,34 @@
#![deny(missing_docs)]
use codec::Encode;
-use futures::{channel::oneshot, future::FutureExt, join, select};
+use error::{Error, Result};
+use futures::{channel::oneshot, future::FutureExt, select};
use polkadot_node_primitives::{
AvailableData, Collation, CollationGenerationConfig, CollationSecondedSignal, PoV,
SubmitCollationParams,
};
use polkadot_node_subsystem::{
- messages::{CollationGenerationMessage, CollatorProtocolMessage},
- overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, RuntimeApiError, SpawnedSubsystem,
- SubsystemContext, SubsystemError, SubsystemResult,
+ messages::{CollationGenerationMessage, CollatorProtocolMessage, RuntimeApiMessage},
+ overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem,
+ SubsystemContext, SubsystemError, SubsystemResult, SubsystemSender,
};
use polkadot_node_subsystem_util::{
- request_async_backing_params, request_availability_cores, request_para_backing_state,
- request_persisted_validation_data, request_validation_code, request_validation_code_hash,
- request_validators, runtime::fetch_claim_queue,
+ request_claim_queue, request_persisted_validation_data, request_session_index_for_child,
+ request_validation_code_hash, request_validators,
+ runtime::{request_node_features, ClaimQueueSnapshot},
};
use polkadot_primitives::{
collator_signature_payload,
- vstaging::{CandidateReceiptV2 as CandidateReceipt, CoreState},
+ node_features::FeatureIndex,
+ vstaging::{
+ transpose_claim_queue, CandidateDescriptorV2, CandidateReceiptV2 as CandidateReceipt,
+ CommittedCandidateReceiptV2, TransposedClaimQueue,
+ },
CandidateCommitments, CandidateDescriptor, CollatorPair, CoreIndex, Hash, Id as ParaId,
- OccupiedCoreAssumption, PersistedValidationData, ScheduledCore, ValidationCodeHash,
+ NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, SessionIndex,
+ ValidationCodeHash,
};
+use schnellru::{ByLength, LruMap};
use sp_core::crypto::Pair;
use std::sync::Arc;
@@ -69,6 +76,7 @@ const LOG_TARGET: &'static str = "parachain::collation-generation";
/// Collation Generation Subsystem
pub struct CollationGenerationSubsystem {
config: Option>,
+ session_info_cache: SessionInfoCache,
metrics: Metrics,
}
@@ -76,7 +84,7 @@ pub struct CollationGenerationSubsystem {
impl CollationGenerationSubsystem {
/// Create a new instance of the `CollationGenerationSubsystem`.
pub fn new(metrics: Metrics) -> Self {
- Self { config: None, metrics }
+ Self { config: None, metrics, session_info_cache: SessionInfoCache::new() }
}
/// Run this subsystem
@@ -117,19 +125,8 @@ impl CollationGenerationSubsystem {
activated,
..
}))) => {
- // follow the procedure from the guide
- if let Some(config) = &self.config {
- let metrics = self.metrics.clone();
- if let Err(err) = handle_new_activations(
- config.clone(),
- activated.into_iter().map(|v| v.hash),
- ctx,
- metrics,
- )
- .await
- {
- gum::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activations");
- }
+ if let Err(err) = self.handle_new_activation(activated.map(|v| v.hash), ctx).await {
+ gum::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activation");
}
false
@@ -154,14 +151,8 @@ impl CollationGenerationSubsystem {
Ok(FromOrchestra::Communication {
msg: CollationGenerationMessage::SubmitCollation(params),
}) => {
- if let Some(config) = &self.config {
- if let Err(err) =
- handle_submit_collation(params, config, ctx, &self.metrics).await
- {
- gum::error!(target: LOG_TARGET, ?err, "Failed to submit collation");
- }
- } else {
- gum::error!(target: LOG_TARGET, "Collation submitted before initialization");
+ if let Err(err) = self.handle_submit_collation(params, ctx).await {
+ gum::error!(target: LOG_TARGET, ?err, "Failed to submit collation");
}
false
@@ -178,175 +169,132 @@ impl CollationGenerationSubsystem {
},
}
}
-}
-
-#[overseer::subsystem(CollationGeneration, error=SubsystemError, prefix=self::overseer)]
-impl CollationGenerationSubsystem {
- fn start(self, ctx: Context) -> SpawnedSubsystem {
- let future = async move {
- self.run(ctx).await;
- Ok(())
- }
- .boxed();
-
- SpawnedSubsystem { name: "collation-generation-subsystem", future }
- }
-}
-#[overseer::contextbounds(CollationGeneration, prefix = self::overseer)]
-async fn handle_new_activations(
- config: Arc,
- activated: impl IntoIterator- ,
- ctx: &mut Context,
- metrics: Metrics,
-) -> crate::error::Result<()> {
- // follow the procedure from the guide:
- // https://paritytech.github.io/polkadot-sdk/book/node/collators/collation-generation.html
-
- // If there is no collation function provided, bail out early.
- // Important: Lookahead collator and slot based collator do not use `CollatorFn`.
- if config.collator.is_none() {
- return Ok(())
- }
-
- let para_id = config.para_id;
-
- let _overall_timer = metrics.time_new_activations();
-
- for relay_parent in activated {
- let _relay_parent_timer = metrics.time_new_activations_relay_parent();
-
- let (availability_cores, validators, async_backing_params) = join!(
- request_availability_cores(relay_parent, ctx.sender()).await,
- request_validators(relay_parent, ctx.sender()).await,
- request_async_backing_params(relay_parent, ctx.sender()).await,
- );
-
- let availability_cores = availability_cores??;
- let async_backing_params = async_backing_params?.ok();
- let n_validators = validators??.len();
- let maybe_claim_queue = fetch_claim_queue(ctx.sender(), relay_parent)
- .await
- .map_err(crate::error::Error::UtilRuntime)?;
-
- // The loop bellow will fill in cores that the para is allowed to build on.
- let mut cores_to_build_on = Vec::new();
-
- // This assumption refers to all cores of the parachain, taking elastic scaling
- // into account.
- let mut para_assumption = None;
- for (core_idx, core) in availability_cores.into_iter().enumerate() {
- // This nested assumption refers only to the core being iterated.
- let (core_assumption, scheduled_core) = match core {
- CoreState::Scheduled(scheduled_core) =>
- (OccupiedCoreAssumption::Free, scheduled_core),
- CoreState::Occupied(occupied_core) => match async_backing_params {
- Some(params) if params.max_candidate_depth >= 1 => {
- // maximum candidate depth when building on top of a block
- // pending availability is necessarily 1 - the depth of the
- // pending block is 0 so the child has depth 1.
-
- // Use claim queue if available, or fallback to `next_up_on_available`
- let res = match maybe_claim_queue {
- Some(ref claim_queue) => {
- // read what's in the claim queue for this core at depth 0.
- claim_queue
- .get_claim_for(CoreIndex(core_idx as u32), 0)
- .map(|para_id| ScheduledCore { para_id, collator: None })
- },
- None => {
- // Runtime doesn't support claim queue runtime api. Fallback to
- // `next_up_on_available`
- occupied_core.next_up_on_available
- },
- };
+ async fn handle_submit_collation(
+ &mut self,
+ params: SubmitCollationParams,
+ ctx: &mut Context,
+ ) -> Result<()> {
+ let Some(config) = &self.config else {
+ return Err(Error::SubmittedBeforeInit);
+ };
+ let _timer = self.metrics.time_submit_collation();
- match res {
- Some(res) => (OccupiedCoreAssumption::Included, res),
- None => continue,
- }
- },
- _ => {
- gum::trace!(
- target: LOG_TARGET,
- core_idx = %core_idx,
- relay_parent = ?relay_parent,
- "core is occupied. Keep going.",
- );
- continue
- },
- },
- CoreState::Free => {
- gum::trace!(
- target: LOG_TARGET,
- core_idx = %core_idx,
- "core is not assigned to any para. Keep going.",
- );
- continue
- },
- };
+ let SubmitCollationParams {
+ relay_parent,
+ collation,
+ parent_head,
+ validation_code_hash,
+ result_sender,
+ core_index,
+ } = params;
- if scheduled_core.para_id != config.para_id {
- gum::trace!(
+ let mut validation_data = match request_persisted_validation_data(
+ relay_parent,
+ config.para_id,
+ OccupiedCoreAssumption::TimedOut,
+ ctx.sender(),
+ )
+ .await
+ .await??
+ {
+ Some(v) => v,
+ None => {
+ gum::debug!(
target: LOG_TARGET,
- core_idx = %core_idx,
relay_parent = ?relay_parent,
our_para = %config.para_id,
- their_para = %scheduled_core.para_id,
- "core is not assigned to our para. Keep going.",
+ "No validation data for para - does it exist at this relay-parent?",
);
- } else {
- // This does not work for elastic scaling, but it should be enough for single
- // core parachains. If async backing runtime is available we later override
- // the assumption based on the `para_backing_state` API response.
- para_assumption = Some(core_assumption);
- // Accumulate cores for building collation(s) outside the loop.
- cores_to_build_on.push(CoreIndex(core_idx as u32));
- }
- }
+ return Ok(())
+ },
+ };
- // Skip to next relay parent if there is no core assigned to us.
- if cores_to_build_on.is_empty() {
- continue
+ // We need to swap the parent-head data, but all other fields here will be correct.
+ validation_data.parent_head = parent_head;
+
+ let claim_queue = request_claim_queue(relay_parent, ctx.sender()).await.await??;
+
+ let session_index =
+ request_session_index_for_child(relay_parent, ctx.sender()).await.await??;
+
+ let session_info =
+ self.session_info_cache.get(relay_parent, session_index, ctx.sender()).await?;
+ let collation = PreparedCollation {
+ collation,
+ relay_parent,
+ para_id: config.para_id,
+ validation_data,
+ validation_code_hash,
+ n_validators: session_info.n_validators,
+ core_index,
+ session_index,
+ };
+
+ construct_and_distribute_receipt(
+ collation,
+ config.key.clone(),
+ ctx.sender(),
+ result_sender,
+ &mut self.metrics,
+ session_info.v2_receipts,
+ &transpose_claim_queue(claim_queue),
+ )
+ .await?;
+
+ Ok(())
+ }
+
+ async fn handle_new_activation(
+ &mut self,
+ maybe_activated: Option,
+ ctx: &mut Context,
+ ) -> Result<()> {
+ let Some(config) = &self.config else {
+ return Ok(());
+ };
+
+ let Some(relay_parent) = maybe_activated else { return Ok(()) };
+
+ // If there is no collation function provided, bail out early.
+ // Important: Lookahead collator and slot based collator do not use `CollatorFn`.
+ if config.collator.is_none() {
+ return Ok(())
}
- // If at least one core is assigned to us, `para_assumption` is `Some`.
- let Some(mut para_assumption) = para_assumption else { continue };
-
- // If it is none it means that neither async backing or elastic scaling (which
- // depends on it) are supported. We'll use the `para_assumption` we got from
- // iterating cores.
- if async_backing_params.is_some() {
- // We are being very optimistic here, but one of the cores could pend availability some
- // more block, ore even time out.
- // For timeout assumption the collator can't really know because it doesn't receive
- // bitfield gossip.
- let para_backing_state =
- request_para_backing_state(relay_parent, config.para_id, ctx.sender())
- .await
- .await??
- .ok_or(crate::error::Error::MissingParaBackingState)?;
-
- // Override the assumption about the para's assigned cores.
- para_assumption = if para_backing_state.pending_availability.is_empty() {
- OccupiedCoreAssumption::Free
- } else {
- OccupiedCoreAssumption::Included
- }
+ let para_id = config.para_id;
+
+ let _timer = self.metrics.time_new_activation();
+
+ let session_index =
+ request_session_index_for_child(relay_parent, ctx.sender()).await.await??;
+
+ let session_info =
+ self.session_info_cache.get(relay_parent, session_index, ctx.sender()).await?;
+ let n_validators = session_info.n_validators;
+
+ let claim_queue =
+ ClaimQueueSnapshot::from(request_claim_queue(relay_parent, ctx.sender()).await.await??);
+
+ let cores_to_build_on = claim_queue
+ .iter_claims_at_depth(0)
+ .filter_map(|(core_idx, para_id)| (para_id == config.para_id).then_some(core_idx))
+ .collect::>();
+
+ // Nothing to do if no core assigned to us.
+ if cores_to_build_on.is_empty() {
+ return Ok(())
}
- gum::debug!(
- target: LOG_TARGET,
- relay_parent = ?relay_parent,
- our_para = %para_id,
- ?para_assumption,
- "Occupied core(s) assumption",
- );
+ // We are being very optimistic here, but one of the cores could be pending availability
+ // for some more blocks, or even time out. We assume all cores are being freed.
let mut validation_data = match request_persisted_validation_data(
relay_parent,
para_id,
- para_assumption,
+ // Just use included assumption always. If there are no pending candidates it's a
+ // no-op.
+ OccupiedCoreAssumption::Included,
ctx.sender(),
)
.await
@@ -360,17 +308,20 @@ async fn handle_new_activations(
our_para = %para_id,
"validation data is not available",
);
- continue
+ return Ok(())
},
};
- let validation_code_hash = match obtain_validation_code_hash_with_assumption(
+ let validation_code_hash = match request_validation_code_hash(
relay_parent,
para_id,
- para_assumption,
+ // Just use included assumption always. If there are no pending candidates it's a
+ // no-op.
+ OccupiedCoreAssumption::Included,
ctx.sender(),
)
- .await?
+ .await
+ .await??
{
Some(v) => v,
None => {
@@ -380,17 +331,19 @@ async fn handle_new_activations(
our_para = %para_id,
"validation code hash is not found.",
);
- continue
+ return Ok(())
},
};
let task_config = config.clone();
- let metrics = metrics.clone();
+ let metrics = self.metrics.clone();
let mut task_sender = ctx.sender().clone();
ctx.spawn(
"chained-collation-builder",
Box::pin(async move {
+ let transposed_claim_queue = transpose_claim_queue(claim_queue.0);
+
for core_index in cores_to_build_on {
let collator_fn = match task_config.collator.as_ref() {
Some(x) => x,
@@ -411,7 +364,7 @@ async fn handle_new_activations(
};
let parent_head = collation.head_data.clone();
- construct_and_distribute_receipt(
+ if let Err(err) = construct_and_distribute_receipt(
PreparedCollation {
collation,
para_id,
@@ -420,13 +373,24 @@ async fn handle_new_activations(
validation_code_hash,
n_validators,
core_index,
+ session_index,
},
task_config.key.clone(),
&mut task_sender,
result_sender,
&metrics,
+ session_info.v2_receipts,
+ &transposed_claim_queue,
)
- .await;
+ .await
+ {
+ gum::error!(
+ target: LOG_TARGET,
+ "Failed to construct and distribute collation: {}",
+ err
+ );
+ return
+ }
// Chain the collations. All else stays the same as we build the chained
// collation on same relay parent.
@@ -434,76 +398,64 @@ async fn handle_new_activations(
}
}),
)?;
- }
- Ok(())
+ Ok(())
+ }
}
-#[overseer::contextbounds(CollationGeneration, prefix = self::overseer)]
-async fn handle_submit_collation(
- params: SubmitCollationParams,
- config: &CollationGenerationConfig,
- ctx: &mut Context,
- metrics: &Metrics,
-) -> crate::error::Result<()> {
- let _timer = metrics.time_submit_collation();
+#[overseer::subsystem(CollationGeneration, error=SubsystemError, prefix=self::overseer)]
+impl CollationGenerationSubsystem {
+ fn start(self, ctx: Context) -> SpawnedSubsystem {
+ let future = async move {
+ self.run(ctx).await;
+ Ok(())
+ }
+ .boxed();
- let SubmitCollationParams {
- relay_parent,
- collation,
- parent_head,
- validation_code_hash,
- result_sender,
- core_index,
- } = params;
+ SpawnedSubsystem { name: "collation-generation-subsystem", future }
+ }
+}
- let validators = request_validators(relay_parent, ctx.sender()).await.await??;
- let n_validators = validators.len();
+#[derive(Clone)]
+struct PerSessionInfo {
+ v2_receipts: bool,
+ n_validators: usize,
+}
- // We need to swap the parent-head data, but all other fields here will be correct.
- let mut validation_data = match request_persisted_validation_data(
- relay_parent,
- config.para_id,
- OccupiedCoreAssumption::TimedOut,
- ctx.sender(),
- )
- .await
- .await??
- {
- Some(v) => v,
- None => {
- gum::debug!(
- target: LOG_TARGET,
- relay_parent = ?relay_parent,
- our_para = %config.para_id,
- "No validation data for para - does it exist at this relay-parent?",
- );
- return Ok(())
- },
- };
+struct SessionInfoCache(LruMap);
- validation_data.parent_head = parent_head;
+impl SessionInfoCache {
+ fn new() -> Self {
+ Self(LruMap::new(ByLength::new(2)))
+ }
- let collation = PreparedCollation {
- collation,
- relay_parent,
- para_id: config.para_id,
- validation_data,
- validation_code_hash,
- n_validators,
- core_index,
- };
+ async fn get>(
+ &mut self,
+ relay_parent: Hash,
+ session_index: SessionIndex,
+ sender: &mut Sender,
+ ) -> Result {
+ if let Some(info) = self.0.get(&session_index) {
+ return Ok(info.clone())
+ }
- construct_and_distribute_receipt(
- collation,
- config.key.clone(),
- ctx.sender(),
- result_sender,
- metrics,
- )
- .await;
+ let n_validators =
+ request_validators(relay_parent, &mut sender.clone()).await.await??.len();
- Ok(())
+ let node_features = request_node_features(relay_parent, session_index, sender)
+ .await?
+ .unwrap_or(NodeFeatures::EMPTY);
+
+ let info = PerSessionInfo {
+ v2_receipts: node_features
+ .get(FeatureIndex::CandidateReceiptV2 as usize)
+ .map(|b| *b)
+ .unwrap_or(false),
+ n_validators,
+ };
+ self.0.insert(session_index, info);
+ Ok(self.0.get(&session_index).expect("Just inserted").clone())
+ }
}
struct PreparedCollation {
@@ -514,6 +466,7 @@ struct PreparedCollation {
validation_code_hash: ValidationCodeHash,
n_validators: usize,
core_index: CoreIndex,
+ session_index: SessionIndex,
}
/// Takes a prepared collation, along with its context, and produces a candidate receipt
@@ -524,7 +477,9 @@ async fn construct_and_distribute_receipt(
sender: &mut impl overseer::CollationGenerationSenderTrait,
result_sender: Option>,
metrics: &Metrics,
-) {
+ v2_receipts: bool,
+ transposed_claim_queue: &TransposedClaimQueue,
+) -> Result<()> {
let PreparedCollation {
collation,
para_id,
@@ -533,6 +488,7 @@ async fn construct_and_distribute_receipt(
validation_code_hash,
n_validators,
core_index,
+ session_index,
} = collation;
let persisted_validation_data_hash = validation_data.hash();
@@ -550,15 +506,7 @@ async fn construct_and_distribute_receipt(
// As such, honest collators never produce an uncompressed PoV which starts with
// a compression magic number, which would lead validators to reject the collation.
if encoded_size > validation_data.max_pov_size as usize {
- gum::debug!(
- target: LOG_TARGET,
- para_id = %para_id,
- size = encoded_size,
- max_size = validation_data.max_pov_size,
- "PoV exceeded maximum size"
- );
-
- return
+ return Err(Error::POVSizeExceeded(encoded_size, validation_data.max_pov_size as usize))
}
pov
@@ -574,18 +522,7 @@ async fn construct_and_distribute_receipt(
&validation_code_hash,
);
- let erasure_root = match erasure_root(n_validators, validation_data, pov.clone()) {
- Ok(erasure_root) => erasure_root,
- Err(err) => {
- gum::error!(
- target: LOG_TARGET,
- para_id = %para_id,
- err = ?err,
- "failed to calculate erasure root",
- );
- return
- },
- };
+ let erasure_root = erasure_root(n_validators, validation_data, pov.clone())?;
let commitments = CandidateCommitments {
upward_messages: collation.upward_messages,
@@ -596,35 +533,67 @@ async fn construct_and_distribute_receipt(
hrmp_watermark: collation.hrmp_watermark,
};
- let ccr = CandidateReceipt {
- commitments_hash: commitments.hash(),
- descriptor: CandidateDescriptor {
- signature: key.sign(&signature_payload),
- para_id,
- relay_parent,
- collator: key.public(),
- persisted_validation_data_hash,
- pov_hash,
- erasure_root,
- para_head: commitments.head_data.hash(),
- validation_code_hash,
+ let receipt = if v2_receipts {
+ let ccr = CommittedCandidateReceiptV2 {
+ descriptor: CandidateDescriptorV2::new(
+ para_id,
+ relay_parent,
+ core_index,
+ session_index,
+ persisted_validation_data_hash,
+ pov_hash,
+ erasure_root,
+ commitments.head_data.hash(),
+ validation_code_hash,
+ ),
+ commitments,
+ };
+
+ ccr.check_core_index(&transposed_claim_queue)
+ .map_err(Error::CandidateReceiptCheck)?;
+
+ ccr.to_plain()
+ } else {
+ if commitments.selected_core().is_some() {
+ gum::warn!(
+ target: LOG_TARGET,
+ ?pov_hash,
+ ?relay_parent,
+ para_id = %para_id,
+ "Candidate commitments contain UMP signal without v2 receipts being enabled.",
+ );
+ }
+ CandidateReceipt {
+ commitments_hash: commitments.hash(),
+ descriptor: CandidateDescriptor {
+ signature: key.sign(&signature_payload),
+ para_id,
+ relay_parent,
+ collator: key.public(),
+ persisted_validation_data_hash,
+ pov_hash,
+ erasure_root,
+ para_head: commitments.head_data.hash(),
+ validation_code_hash,
+ }
+ .into(),
}
- .into(),
};
gum::debug!(
target: LOG_TARGET,
- candidate_hash = ?ccr.hash(),
+ candidate_hash = ?receipt.hash(),
?pov_hash,
?relay_parent,
para_id = %para_id,
+ ?core_index,
"candidate is generated",
);
metrics.on_collation_generated();
sender
.send_message(CollatorProtocolMessage::DistributeCollation {
- candidate_receipt: ccr,
+ candidate_receipt: receipt,
parent_head_data_hash,
pov,
parent_head_data,
@@ -632,40 +601,15 @@ async fn construct_and_distribute_receipt(
core_index,
})
.await;
-}
-async fn obtain_validation_code_hash_with_assumption(
- relay_parent: Hash,
- para_id: ParaId,
- assumption: OccupiedCoreAssumption,
- sender: &mut impl overseer::CollationGenerationSenderTrait,
-) -> crate::error::Result