diff --git a/Cargo.lock b/Cargo.lock
index 0a29b20c02de4..07505cb420516 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4969,6 +4969,7 @@ dependencies = [
"cumulus-test-client",
"cumulus-test-relay-sproof-builder 0.7.0",
"cumulus-test-runtime",
+ "docify",
"environmental",
"frame-benchmarking 28.0.0",
"frame-support 28.0.0",
diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml
index 6b6bc4fbcefe5..31271b883fadb 100644
--- a/cumulus/pallets/parachain-system/Cargo.toml
+++ b/cumulus/pallets/parachain-system/Cargo.toml
@@ -14,6 +14,7 @@ workspace = true
[dependencies]
bytes = { workspace = true }
codec = { features = ["derive"], workspace = true }
+docify = { workspace = true }
environmental = { workspace = true }
impl-trait-for-tuples = { workspace = true }
log = { workspace = true }
diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs
index 0a4a29539b2cc..2888c6aa7945f 100644
--- a/cumulus/pallets/parachain-system/src/lib.rs
+++ b/cumulus/pallets/parachain-system/src/lib.rs
@@ -192,6 +192,7 @@ pub mod ump_constants {
}
/// Trait for selecting the next core to build the candidate for.
+#[docify::export]
pub trait SelectCore {
/// Core selector information for the current block.
fn selected_core() -> (CoreSelector, ClaimQueueOffset);
diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs
index 0d526b09834e9..9742d1d963f88 100644
--- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs
+++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs
@@ -35,13 +35,10 @@ use cumulus_client_collator::service::{
};
#[docify::export(slot_based_colator_import)]
use cumulus_client_consensus_aura::collators::slot_based::{
- self as slot_based, Params as SlotBasedParams,
+ self as slot_based, Params as SlotBasedParams, SlotBasedBlockImport, SlotBasedBlockImportHandle,
};
use cumulus_client_consensus_aura::{
- collators::{
- lookahead::{self as aura, Params as AuraParams},
- slot_based::{SlotBasedBlockImport, SlotBasedBlockImportHandle},
- },
+ collators::lookahead::{self as aura, Params as AuraParams},
equivocation_import_queue::Verifier as EquivocationVerifier,
};
use cumulus_client_consensus_proposer::{Proposer, ProposerInterface};
diff --git a/docs/sdk/src/guides/enable_elastic_scaling.rs b/docs/sdk/src/guides/enable_elastic_scaling.rs
new file mode 100644
index 0000000000000..f279e254a530a
--- /dev/null
+++ b/docs/sdk/src/guides/enable_elastic_scaling.rs
@@ -0,0 +1,249 @@
+//! # Enable elastic scaling for a parachain
+//!
+//!
This guide assumes full familiarity with Asynchronous Backing and its
+//! terminology, as defined in
the Polkadot Wiki.
+//! Furthermore, the parachain should have already been upgraded according to the guide.
+//!
+//! ## Quick introduction to elastic scaling
+//!
+//! [Elastic scaling](https://polkadot.network/blog/elastic-scaling-streamling-growth-on-polkadot)
+//! is a feature that enables parachains to seamlessly scale up/down the number of used cores.
+//! This can be used to increase the available compute and bandwidth resources of a parachain or
+//! to lower the transaction inclusion latency by decreasing block time.
+//!
+//! ## Performance characteristics and constraints
+//!
+//! Elastic scaling is still considered experimental software, so stability is not guaranteed.
+//! If you encounter any problems,
+//! [please open an issue](https://github.com/paritytech/polkadot-sdk/issues).
+//! Below are described the constraints and performance characteristics of the implementation:
+//!
+//! 1. **Bounded compute throughput**. Each parachain block gets at most 2 seconds of execution on
+//! the relay chain. Therefore, assuming the full 2 seconds are used, a parachain can only
+//! utilise at most 3 cores in a relay chain slot of 6 seconds. If the full execution time is not
+//! being used or if all collators are able to author blocks faster than the reference hardware,
+//! higher core counts can be achieved.
+//! 2. **Sequential block execution**. Each collator must import the previous block before authoring
+//! a new one. At present this happens sequentially, which limits the maximum compute throughput when
+//! using multiple collators. To briefly explain the reasoning: first, the previous collator spends
+//! 2 seconds building the block and announces it. The next collator fetches and executes it, wasting
+//! 2 seconds plus the block fetching duration out of its 2 second slot. Therefore, the next collator
+//! cannot build a subsequent block in due time and ends up authoring a fork, which defeats the purpose
+//! of elastic scaling. The highest throughput can therefore be achieved with a single collator but
+//! this should obviously only be used for testing purposes, due to the clear lack of decentralisation
+//! and resilience. In other words, to fully utilise the cores, the following formula needs to be
+//! satisfied: `2 * authorship duration + network overheads <= slot time`. For example, you can use
+//! 2 cores with authorship time of 1.3 seconds per block, which leaves 400ms for networking overhead.
+//! This would allow for 2.6 seconds of execution, compared to the 2 seconds async backing enabled.
+//! If block authoring duration is low and you attempt to use elastic scaling for achieving low latency
+//! or increasing storage throughput, this is not a problem. Developments required for streamlining
+//! block production are tracked by [this issue](https://github.com/paritytech/polkadot-sdk/issues/5190).
+//! 3. **Lack of out-of-the-box automated scaling.** For true elasticity, the parachain must be able
+//! to seamlessly acquire or sell coretime as the user demand grows and shrinks over time, in an
+//! automated manner. This is currently lacking - a parachain can only scale up or down by
+//! implementing some off-chain solution for managing the core time resources.
+//! This is not in the scope of the relay chain functionality. Parachains can already start
+//! implementing such autoscaling, but we aim to provide a framework/examples for developing
+//! autoscaling strategies.
+//! Tracked by [this issue](https://github.com/paritytech/polkadot-sdk/issues/1487).
+//! An in-progress external implementation by RegionX can be found [here](https://github.com/RegionX-Labs/On-Demand).
+//!
+//! Another important constraint is that when a parachain forks, the throughput decreases and
+//! latency increases because the number of blocks backed per relay chain block goes down.
+//!
+//! ## Using elastic scaling
+//!
+//! [Here](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs)
+//! is a zombienet test which exercises 500ms parachain blocks, which you can modify to test your
+//! parachain after going through the required phases below.
+//!
+//! ### Prerequisites
+//!
+//! - Ensure Asynchronous Backing (6-second blocks) has been enabled on the parachain using
+//! [`crate::guides::async_backing_guide`].
+//! - Ensure the `AsyncBackingParams.max_candidate_depth` value is configured to a value that is at
+//! least double the maximum targeted parachain velocity. For example, if the parachain will build
+//! at most 3 candidates per relay chain block, the `max_candidate_depth` should be at least 6.
+//! - Ensure enough coretime is assigned to the parachain.
+//! - Ensure the `CandidateReceiptV2` node feature is enabled on the relay chain configuration (node
+//! feature bit number 3).
+//!
+//! Phase 1 is NOT needed if using the polkadot-parachain
or
+//! polkadot-omni-node
binary, or polkadot-omni-node-lib
built from the
+//! latest polkadot-sdk release! Simply pass the --experimental-use-slot-based
+//! ([`polkadot_omni_node_lib::cli::Cli::experimental_use_slot_based`]) parameter to the command
+//! line and jump to Phase 2.
+//!
+//! ### Phase 1 - (For custom parachain node) Update Parachain Node
+//!
+//! This assumes you are using
+//! [the latest parachain template](https://github.com/paritytech/polkadot-sdk/tree/master/templates/parachain).
+//!
+//! This phase consists of plugging in the new slot-based collator, which is a requirement for
+//! elastic scaling.
+//!
+//! 1. In `node/src/service.rs` import the slot based collator instead of the lookahead collator, as
+//! well as the `SlotBasedBlockImport` and `SlotBasedBlockImportHandle`.
+#![doc = docify::embed!("../../cumulus/polkadot-omni-node/lib/src/nodes/aura.rs", slot_based_colator_import)]
+//!
+//! 2. Modify the `ParachainBlockImport` and `Service` type definitions:
+//! ```ignore
+//! type ParachainBlockImport = TParachainBlockImport<
+//! Block,
+//! SlotBasedBlockImport, ParachainClient>,
+//! ParachainBackend,
+//! >;
+//! ```
+//!
+//! ```ignore
+//! pub type Service = PartialComponents<
+//! ParachainClient,
+//! ParachainBackend,
+//! (),
+//! sc_consensus::DefaultImportQueue,
+//! sc_transaction_pool::TransactionPoolHandle,
+//! (
+//! ParachainBlockImport,
+//! SlotBasedBlockImportHandle,
+//! Option,
+//! Option,
+//! ),
+//! >;
+//! ```
+//!
+//! 3. In `new_partial()`:
+//! - Instantiate the `SlotBasedBlockImport` and pass the returned `block_import` value to
+//! `ParachainBlockImport::new` and the returned `slot_based_handle` to the `other` field of
+//! the `PartialComponents` struct.
+//!
+//! ```ignore
+//! let (block_import, slot_based_handle) = SlotBasedBlockImport::new(
+//! client.clone(),
+//! client.clone()
+//! );
+//! let block_import = ParachainBlockImport::new(block_import.clone(), backend.clone());
+//! ```
+//!
+//! ```ignore
+//! Ok(PartialComponents {
+//! backend,
+//! client,
+//! import_queue,
+//! keystore_container,
+//! task_manager,
+//! transaction_pool,
+//! select_chain: (),
+//! other: (block_import, slot_based_handle, telemetry, telemetry_worker_handle),
+//! })
+//! ```
+//!
+//! 4. In `start_consensus()`:
+//! - Remove the `overseer_handle` and `relay_chain_slot_duration` params (also remove the
+//! `OverseerHandle` type import if it’s not used elsewhere).
+//! - Add a new parameter for the block import handle:
+//! `block_import_handle: SlotBasedBlockImportHandle`
+//! - Rename `AuraParams` to `SlotBasedParams`, remove the `overseer_handle` and
+//! `relay_chain_slot_duration` fields and add a `slot_drift` field with a value of
+//! `Duration::from_secs(1)`. Also add a `spawner` field initialized to
+//! `task_manager.spawn_handle()` and pass in the `block_import_handle` param.
+//! - (Optional): You may need to customise the `authoring_duration` field of `SlotBasedParams`
+//! if using more than 3 cores. The authoring duration generally needs to be equal to the
+//! parachain slot duration.
+//! - Replace the `aura::run` with the `slot_based::run` call and remove the explicit task
+//! spawn:
+#![doc = docify::embed!("../../cumulus/polkadot-omni-node/lib/src/nodes/aura.rs", launch_slot_based_collator)]
+//!
+//! 3. In `start_parachain_node()`, destructure `slot_based_handle` from `params.other`. Remove the
+//! `overseer_handle` and `relay_chain_slot_duration` params passed to `start_consensus` and pass
+//! in the `slot_based_handle`.
+//!
+//! ### Phase 2 - Configure core selection policy in the parachain runtime
+//!
+//! [RFC-103](https://polkadot-fellows.github.io/RFCs/approved/0103-introduce-core-index-commitment.html) enables
+//! parachain runtimes to constrain the execution of each block to a specified core, ensuring better
+//! security and liveness, which is mandatory for launching in production. More details are
+//! described in the RFC. To make use of this feature, the `SelectCore` trait needs to be
+//! implemented.
+#![doc = docify::embed!("../../cumulus/pallets/parachain-system/src/lib.rs", SelectCore)]
+//!
+//! For the vast majority of use cases, you will not need to implement a custom core
+//! selector. There are two pre-defined core selection policies to choose from
+//! `DefaultCoreSelector` and `LookaheadCoreSelector`.
+//!
+//! - The `DefaultCoreSelector` implements a round-robin selection on the cores that can be
+//! occupied by the parachain at the very next relay parent. This is the equivalent to what all
+//! parachains on production networks have been using so far.
+//!
+//! - The `LookaheadCoreSelector` also does a round robin on the assigned cores, but not those that
+//! can be occupied at the very next relay parent. Instead, it uses the ones after. In other words,
+//! the collator gets more time to build and advertise a collation for an assignment. This makes no
+//! difference in practice if the parachain is continuously scheduled on the cores. This policy is
+//! especially desirable for parachains that are sharing a core or that use on-demand coretime.
+//!
+//! In your /runtime/src/lib.rs, define a `SelectCore` type and use this to set the `SelectCore`
+//! property (overwrite it with the chosen policy type):
+#![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", default_select_core)]
+//! ```ignore
+//! impl cumulus_pallet_parachain_system::Config for Runtime {
+//! ...
+//! type SelectCore = SelectCore;
+//! ...
+//! }
+//! ```
+//!
+//! Next, we need to implement the `GetCoreSelector` runtime API. In the `impl_runtime_apis` block
+//! for your runtime, add the following code:
+//!
+//! ```ignore
+//! impl cumulus_primitives_core::GetCoreSelectorApi for Runtime {
+//! fn core_selector() -> (cumulus_primitives_core::CoreSelector, cumulus_primitives_core::ClaimQueueOffset) {
+//! ParachainSystem::core_selector()
+//! }
+//! }
+//! ```
+//!
+//! ### Phase 3 - Configure maximum scaling factor in the runtime
+//!
+//! *A sample test parachain runtime which has compile-time features for configuring elastic scaling
+//! can be found [here](https://github.com/paritytech/polkadot-sdk/blob/master/cumulus/test/runtime/src/lib.rs)*
+//!
+//! First of all, you need to decide the upper limit to how many parachain blocks you need to
+//! produce per relay chain block (in direct correlation with the number of acquired cores).
+//! This is called the parachain velocity.
+//!
+//! If you configure a velocity which is different from the number of assigned
+//! cores, the measured velocity in practice will be the minimum of these two. However, be mindful
+//! that if the velocity is higher than the number of assigned cores, it's possible that
+//!
only a subset of the collator set will be authoring blocks.
+//!
+//! The chosen velocity will also be used to compute:
+//! - The slot duration, by dividing the 6000 ms duration of the relay chain slot duration by the
+//! velocity.
+//! - The unincluded segment capacity, by multiplying the velocity with 2 and adding 1 to
+//! it.
+//!
+//! Let’s assume a desired maximum velocity of 3 parachain blocks per relay chain block. The needed
+//! changes would all be done in `runtime/src/lib.rs`:
+//!
+//! 1. Rename `BLOCK_PROCESSING_VELOCITY` to `MAX_BLOCK_PROCESSING_VELOCITY` and increase it to the
+//! desired value. In this example, 3.
+//!
+//! ```ignore
+//! const MAX_BLOCK_PROCESSING_VELOCITY: u32 = 3;
+//! ```
+//!
+//! 2. Set the `MILLI_SECS_PER_BLOCK` to the desired value.
+//!
+//! ```ignore
+//! const MILLI_SECS_PER_BLOCK: u32 =
+//! RELAY_CHAIN_SLOT_DURATION_MILLIS / MAX_BLOCK_PROCESSING_VELOCITY;
+//! ```
+//! Note: for a parachain which measures time in terms of its own block number, changing block
+//! time may cause complications, requiring additional changes. See here more information:
+//! [`crate::guides::async_backing_guide#timing-by-block-number`].
+//!
+//! 3. Increase the `UNINCLUDED_SEGMENT_CAPACITY` to the desired value.
+//!
+//! ```ignore
+//! const UNINCLUDED_SEGMENT_CAPACITY: u32 = 2 * MAX_BLOCK_PROCESSING_VELOCITY + 1;
+//! ```
diff --git a/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs b/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs
deleted file mode 100644
index 2339088abed46..0000000000000
--- a/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs
+++ /dev/null
@@ -1,144 +0,0 @@
-//! # Enable elastic scaling MVP for a parachain
-//!
-//! This guide assumes full familiarity with Asynchronous Backing and its
-//! terminology, as defined in
the Polkadot Wiki.
-//! Furthermore, the parachain should have already been upgraded according to the guide.
-//!
-//! ## Quick introduction to elastic scaling
-//!
-//! [Elastic scaling](https://polkadot.network/blog/elastic-scaling-streamling-growth-on-polkadot)
-//! is a feature that will enable parachains to seamlessly scale up/down the number of used cores.
-//! This can be desirable in order to increase the compute or storage throughput of a parachain or
-//! to lower the latency between a transaction being submitted and it getting built in a parachain
-//! block.
-//!
-//! At present, with Asynchronous Backing enabled, a parachain can only include a block on the relay
-//! chain every 6 seconds, irregardless of how many cores the parachain acquires. Elastic scaling
-//! builds further on the 10x throughput increase of Async Backing, enabling collators to submit up
-//! to 3 parachain blocks per relay chain block, resulting in a further 3x throughput increase.
-//!
-//! ## Current limitations of the MVP
-//!
-//! The full implementation of elastic scaling spans across the entire relay/parachain stack and is
-//! still [work in progress](https://github.com/paritytech/polkadot-sdk/issues/1829).
-//! The MVP is still considered experimental software, so stability is not guaranteed.
-//! If you encounter any problems,
-//! [please open an issue](https://github.com/paritytech/polkadot-sdk/issues).
-//! Below are described the current limitations of the MVP:
-//!
-//! 1. **Limited core count**. Parachain block authoring is sequential, so the second block will
-//! start being built only after the previous block is imported. The current block production is
-//! capped at 2 seconds of execution. Therefore, assuming the full 2 seconds are used, a
-//! parachain can only utilise at most 3 cores in a relay chain slot of 6 seconds. If the full
-//! execution time is not being used, higher core counts can be achieved.
-//! 2. **Single collator requirement for consistently scaling beyond a core at full authorship
-//! duration of 2 seconds per block.** Using the current implementation with multiple collators
-//! adds additional latency to the block production pipeline. Assuming block execution takes
-//! about the same as authorship, the additional overhead is equal the duration of the authorship
-//! plus the block announcement. Each collator must first import the previous block before
-//! authoring a new one, so it is clear that the highest throughput can be achieved using a
-//! single collator. Experiments show that the peak performance using more than one collator
-//! (measured up to 10 collators) is utilising 2 cores with authorship time of 1.3 seconds per
-//! block, which leaves 400ms for networking overhead. This would allow for 2.6 seconds of
-//! execution, compared to the 2 seconds async backing enabled.
-//! [More experiments](https://github.com/paritytech/polkadot-sdk/issues/4696) are being
-//! conducted in this space.
-//! 3. **Trusted collator set.** The collator set needs to be trusted until there’s a mitigation
-//! that would prevent or deter multiple collators from submitting the same collation to multiple
-//! backing groups. A solution is being discussed
-//! [here](https://github.com/polkadot-fellows/RFCs/issues/92).
-//! 4. **Fixed scaling.** For true elasticity, the parachain must be able to seamlessly acquire or
-//! sell coretime as the user demand grows and shrinks over time, in an automated manner. This is
-//! currently lacking - a parachain can only scale up or down by “manually” acquiring coretime.
-//! This is not in the scope of the relay chain functionality. Parachains can already start
-//! implementing such autoscaling, but we aim to provide a framework/examples for developing
-//! autoscaling strategies.
-//!
-//! Another hard limitation that is not envisioned to ever be lifted is that parachains which create
-//! forks will generally not be able to utilise the full number of cores they acquire.
-//!
-//! ## Using elastic scaling MVP
-//!
-//! ### Prerequisites
-//!
-//! - Ensure Asynchronous Backing is enabled on the network and you have enabled it on the parachain
-//! using [`crate::guides::async_backing_guide`].
-//! - Ensure the `AsyncBackingParams.max_candidate_depth` value is configured to a value that is at
-//! least double the maximum targeted parachain velocity. For example, if the parachain will build
-//! at most 3 candidates per relay chain block, the `max_candidate_depth` should be at least 6.
-//! - Use a trusted single collator for maximum throughput.
-//! - Ensure enough coretime is assigned to the parachain. For maximum throughput the upper bound is
-//! 3 cores.
-//!
-//! Phase 1 is NOT needed if using the polkadot-parachain
or
-//! polkadot-omni-node
binary, or polkadot-omni-node-lib
built from the
-//! latest polkadot-sdk release! Simply pass the --experimental-use-slot-based
-//! ([`polkadot_omni_node_lib::cli::Cli::experimental_use_slot_based`]) parameter to the command
-//! line and jump to Phase 2.
-//!
-//! The following steps assume using the cumulus parachain template.
-//!
-//! ### Phase 1 - (For custom parachain node) Update Parachain Node
-//!
-//! This assumes you are using
-//! [the latest parachain template](https://github.com/paritytech/polkadot-sdk/tree/master/templates/parachain).
-//!
-//! This phase consists of plugging in the new slot-based collator.
-//!
-//! 1. In `node/src/service.rs` import the slot based collator instead of the lookahead collator.
-#![doc = docify::embed!("../../cumulus/polkadot-omni-node/lib/src/nodes/aura.rs", slot_based_colator_import)]
-//!
-//! 2. In `start_consensus()`
-//! - Remove the `overseer_handle` param (also remove the
-//! `OverseerHandle` type import if it’s not used elsewhere).
-//! - Rename `AuraParams` to `SlotBasedParams`, remove the `overseer_handle` field and add a
-//! `slot_drift` field with a value of `Duration::from_secs(1)`.
-//! - Replace the single future returned by `aura::run` with the two futures returned by it and
-//! spawn them as separate tasks:
-#![doc = docify::embed!("../../cumulus/polkadot-omni-node/lib/src/nodes/aura.rs", launch_slot_based_collator)]
-//!
-//! 3. In `start_parachain_node()` remove the `overseer_handle` param passed to `start_consensus`.
-//!
-//! ### Phase 2 - Activate fixed factor scaling in the runtime
-//!
-//! This phase consists of a couple of changes needed to be made to the parachain’s runtime in order
-//! to utilise fixed factor scaling.
-//!
-//! First of all, you need to decide the upper limit to how many parachain blocks you need to
-//! produce per relay chain block (in direct correlation with the number of acquired cores). This
-//! should be either 1 (no scaling), 2 or 3. This is called the parachain velocity.
-//!
-//! If you configure a velocity which is different from the number of assigned cores, the measured
-//! velocity in practice will be the minimum of these two.
-//!
-//! The chosen velocity will also be used to compute:
-//! - The slot duration, by dividing the 6000 ms duration of the relay chain slot duration by the
-//! velocity.
-//! - The unincluded segment capacity, by multiplying the velocity with 2 and adding 1 to
-//! it.
-//!
-//! Let’s assume a desired maximum velocity of 3 parachain blocks per relay chain block. The needed
-//! changes would all be done in `runtime/src/lib.rs`:
-//!
-//! 1. Rename `BLOCK_PROCESSING_VELOCITY` to `MAX_BLOCK_PROCESSING_VELOCITY` and increase it to the
-//! desired value. In this example, 3.
-//!
-//! ```ignore
-//! const MAX_BLOCK_PROCESSING_VELOCITY: u32 = 3;
-//! ```
-//!
-//! 2. Set the `MILLISECS_PER_BLOCK` to the desired value.
-//!
-//! ```ignore
-//! const MILLISECS_PER_BLOCK: u32 =
-//! RELAY_CHAIN_SLOT_DURATION_MILLIS / MAX_BLOCK_PROCESSING_VELOCITY;
-//! ```
-//! Note: for a parachain which measures time in terms of its own block number, changing block
-//! time may cause complications, requiring additional changes. See here more information:
-//! [`crate::guides::async_backing_guide#timing-by-block-number`].
-//!
-//! 3. Increase the `UNINCLUDED_SEGMENT_CAPACITY` to the desired value.
-//!
-//! ```ignore
-//! const UNINCLUDED_SEGMENT_CAPACITY: u32 = 2 * MAX_BLOCK_PROCESSING_VELOCITY + 1;
-//! ```
diff --git a/docs/sdk/src/guides/mod.rs b/docs/sdk/src/guides/mod.rs
index 747128a728d0a..adcba1212bbb8 100644
--- a/docs/sdk/src/guides/mod.rs
+++ b/docs/sdk/src/guides/mod.rs
@@ -43,5 +43,5 @@ pub mod async_backing_guide;
/// How to enable metadata hash verification in the runtime.
pub mod enable_metadata_hash;
-/// How to enable elastic scaling MVP on a parachain.
-pub mod enable_elastic_scaling_mvp;
+/// How to enable elastic scaling on a parachain.
+pub mod enable_elastic_scaling;
diff --git a/prdoc/pr_6739.prdoc b/prdoc/pr_6739.prdoc
new file mode 100644
index 0000000000000..57f7f11d337df
--- /dev/null
+++ b/prdoc/pr_6739.prdoc
@@ -0,0 +1,12 @@
+title: elastic scaling documentation updates
+
+doc:
+ - audience: [Node Dev, Runtime Dev]
+ description: |
+ "Updates the elastic scaling guide for parachains, taking into consideration the completed implementation of
+ [RFC-103](https://github.com/polkadot-fellows/RFCs/pull/103), which enables an untrusted collator set for
+ elastic scaling. Adds the necessary instructions for configuring the parachain so that it can leverage this implementation."
+
+crates:
+ - name: cumulus-pallet-parachain-system
+ bump: none
diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs
index 8c526317283ea..cba6c5d644546 100644
--- a/templates/parachain/node/src/service.rs
+++ b/templates/parachain/node/src/service.rs
@@ -15,7 +15,9 @@ use polkadot_sdk::*;
use cumulus_client_cli::CollatorOptions;
use cumulus_client_collator::service::CollatorService;
#[docify::export(lookahead_collator)]
-use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams};
+use cumulus_client_consensus_aura::collators::slot_based::{
+ self as slot_based, Params as SlotBasedParams, SlotBasedBlockImport, SlotBasedBlockImportHandle,
+};
use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport;
use cumulus_client_consensus_proposer::Proposer;
use cumulus_client_service::{
@@ -28,7 +30,7 @@ use cumulus_primitives_core::{
relay_chain::{CollatorPair, ValidationCode},
ParaId,
};
-use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface};
+use cumulus_relay_chain_interface::RelayChainInterface;
// Substrate Imports
use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE;
@@ -49,7 +51,11 @@ type ParachainClient = TFullClient;
type ParachainBackend = TFullBackend;
-type ParachainBlockImport = TParachainBlockImport, ParachainBackend>;
+type ParachainBlockImport = TParachainBlockImport<
+ Block,
+ SlotBasedBlockImport, ParachainClient>,
+ ParachainBackend,
+>;
/// Assembly of PartialComponents (enough to run chain ops subcommands)
pub type Service = PartialComponents<
@@ -58,7 +64,12 @@ pub type Service = PartialComponents<
(),
sc_consensus::DefaultImportQueue,
sc_transaction_pool::TransactionPoolHandle,
- (ParachainBlockImport, Option, Option),
+ (
+ ParachainBlockImport,
+ SlotBasedBlockImportHandle,
+ Option,
+ Option,
+ ),
>;
/// Starts a `ServiceBuilder` for a full service.
@@ -118,7 +129,9 @@ pub fn new_partial(config: &Configuration) -> Result
.build(),
);
- let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
+ let (block_import, slot_based_handle) =
+ SlotBasedBlockImport::new(client.clone(), client.clone());
+ let block_import = ParachainBlockImport::new(block_import.clone(), backend.clone());
let import_queue = build_import_queue(
client.clone(),
@@ -136,7 +149,7 @@ pub fn new_partial(config: &Configuration) -> Result
task_manager,
transaction_pool,
select_chain: (),
- other: (block_import, telemetry, telemetry_worker_handle),
+ other: (block_import, slot_based_handle, telemetry, telemetry_worker_handle),
})
}
@@ -178,11 +191,10 @@ fn start_consensus(
relay_chain_interface: Arc,
transaction_pool: Arc>,
keystore: KeystorePtr,
- relay_chain_slot_duration: Duration,
para_id: ParaId,
collator_key: CollatorPair,
- overseer_handle: OverseerHandle,
announce_block: Arc>) + Send + Sync>,
+ block_import_handle: SlotBasedBlockImportHandle,
) -> Result<(), sc_service::Error> {
let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
task_manager.spawn_handle(),
@@ -201,7 +213,7 @@ fn start_consensus(
client.clone(),
);
- let params = AuraParams {
+ let params = SlotBasedParams {
create_inherent_data_providers: move |_, ()| async move { Ok(()) },
block_import,
para_client: client.clone(),
@@ -213,17 +225,17 @@ fn start_consensus(
keystore,
collator_key,
para_id,
- overseer_handle,
- relay_chain_slot_duration,
+ slot_drift: Duration::from_secs(1),
proposer,
collator_service,
authoring_duration: Duration::from_millis(2000),
reinitialize: false,
+ spawner: task_manager.spawn_handle(),
+ block_import_handle,
};
- let fut = aura::run::(
+ slot_based::run::(
params,
);
- task_manager.spawn_essential_handle().spawn("aura", None, fut);
Ok(())
}
@@ -240,7 +252,7 @@ pub async fn start_parachain_node(
let parachain_config = prepare_node_config(parachain_config);
let params = new_partial(¶chain_config)?;
- let (block_import, mut telemetry, telemetry_worker_handle) = params.other;
+ let (block_import, slot_based_handle, mut telemetry, telemetry_worker_handle) = params.other;
let prometheus_registry = parachain_config.prometheus_registry().cloned();
let net_config = sc_network::config::FullNetworkConfiguration::<
@@ -398,11 +410,10 @@ pub async fn start_parachain_node(
relay_chain_interface,
transaction_pool,
params.keystore_container.keystore(),
- relay_chain_slot_duration,
para_id,
collator_key.expect("Command line arguments do not allow this. qed"),
- overseer_handle,
announce_block,
+ slot_based_handle,
)?;
}
diff --git a/templates/parachain/runtime/src/apis.rs b/templates/parachain/runtime/src/apis.rs
index d7da43b86af16..2551fb1bf8dfb 100644
--- a/templates/parachain/runtime/src/apis.rs
+++ b/templates/parachain/runtime/src/apis.rs
@@ -86,6 +86,12 @@ impl_runtime_apis! {
}
}
+ impl cumulus_primitives_core::GetCoreSelectorApi for Runtime {
+ fn core_selector() -> (cumulus_primitives_core::CoreSelector, cumulus_primitives_core::ClaimQueueOffset) {
+ ParachainSystem::core_selector()
+ }
+ }
+
impl sp_api::Core for Runtime {
fn version() -> RuntimeVersion {
VERSION
diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs
index 1e9155f59a57a..253205d1d5ab8 100644
--- a/templates/parachain/runtime/src/configs/mod.rs
+++ b/templates/parachain/runtime/src/configs/mod.rs
@@ -61,9 +61,9 @@ use super::{
weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight},
AccountId, Aura, Balance, Balances, Block, BlockNumber, CollatorSelection, ConsensusHook, Hash,
MessageQueue, Nonce, PalletInfo, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent,
- RuntimeFreezeReason, RuntimeHoldReason, RuntimeOrigin, RuntimeTask, Session, SessionKeys,
- System, WeightToFee, XcmpQueue, AVERAGE_ON_INITIALIZE_RATIO, EXISTENTIAL_DEPOSIT, HOURS,
- MAXIMUM_BLOCK_WEIGHT, MICRO_UNIT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, VERSION,
+ RuntimeFreezeReason, RuntimeHoldReason, RuntimeOrigin, RuntimeTask, SelectCore, Session,
+ SessionKeys, System, WeightToFee, XcmpQueue, AVERAGE_ON_INITIALIZE_RATIO, EXISTENTIAL_DEPOSIT,
+ HOURS, MAXIMUM_BLOCK_WEIGHT, MICRO_UNIT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, VERSION,
};
use xcm_config::{RelayLocation, XcmOriginToTransactDispatchOrigin};
@@ -209,7 +209,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime {
type ReservedXcmpWeight = ReservedXcmpWeight;
type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases;
type ConsensusHook = ConsensusHook;
- type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector;
+ type SelectCore = SelectCore;
}
impl parachain_info::Config for Runtime {}
diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs
index f312e9f80192f..9c9f8d9f40cb8 100644
--- a/templates/parachain/runtime/src/lib.rs
+++ b/templates/parachain/runtime/src/lib.rs
@@ -243,6 +243,10 @@ type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook<
UNINCLUDED_SEGMENT_CAPACITY,
>;
+#[docify::export(default_select_core)]
+/// Core selection policy
+type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector;
+
/// The version information used to identify this runtime when compiled natively.
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {