Skip to content

Commit

Permalink
Move cumulus zombienet tests to aura & async backing (#3568)
Browse files Browse the repository at this point in the history
Cumulus test-parachain node and test runtime were still using relay
chain consensus and 12s blocktimes. With async backing around the corner
on the major chains we should switch our tests too.

Also needed to nicely test the changes coming to collators in #3168.

### Changes Overview
- Followed the [migration
guide](https://wiki.polkadot.network/docs/maintain-guides-async-backing)
for async backing for the cumulus-test-runtime
- Adjusted the cumulus-test-service to use the correct import-queue,
lookahead collator etc.
- The block validation function now uses the Aura Ext Executor so that
the seal of the block is validated
- Previous point requires that we seal block before calling into
`validate_block`, I introduced a helper function for that
- Test client adjusted to provide a slot to the relay chain proof and
the aura pre-digest
  • Loading branch information
skunert authored Apr 9, 2024
1 parent cb192d1 commit df818d2
Show file tree
Hide file tree
Showing 21 changed files with 511 additions and 306 deletions.
27 changes: 22 additions & 5 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 4 additions & 6 deletions cumulus/client/collator/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -381,13 +381,11 @@ mod tests {
sproof.included_para_head = Some(HeadData(parent.encode()));
sproof.para_id = cumulus_test_runtime::PARACHAIN_ID.into();

let builder = self.client.init_block_builder_at(
parent.hash(),
Some(validation_data.clone()),
sproof,
);
let cumulus_test_client::BlockBuilderAndSupportData { block_builder, .. } = self
.client
.init_block_builder_at(parent.hash(), Some(validation_data.clone()), sproof);

let (block, _, proof) = builder.build().expect("Creates block").into_inner();
let (block, _, proof) = block_builder.build().expect("Creates block").into_inner();

self.client
.import(BlockOrigin::Own, block.clone())
Expand Down
62 changes: 31 additions & 31 deletions cumulus/client/consensus/common/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -274,15 +274,15 @@ fn build_block<B: InitBlockBuilder>(
timestamp: Option<u64>,
relay_parent: Option<PHash>,
) -> Block {
let builder = match at {
let cumulus_test_client::BlockBuilderAndSupportData { block_builder, .. } = match at {
Some(at) => match timestamp {
Some(ts) => builder.init_block_builder_with_timestamp(at, None, sproof, ts),
None => builder.init_block_builder_at(at, None, sproof),
},
None => builder.init_block_builder(None, sproof),
};

let mut block = builder.build().unwrap().block;
let mut block = block_builder.build().unwrap().block;

if let Some(relay_parent) = relay_parent {
block
Expand Down Expand Up @@ -503,7 +503,7 @@ fn follow_finalized_does_not_stop_on_unknown_block() {

let unknown_block = {
let sproof = sproof_with_parent_by_hash(&client, block.hash());
let block_builder = client.init_block_builder_at(block.hash(), None, sproof);
let block_builder = client.init_block_builder_at(block.hash(), None, sproof).block_builder;
block_builder.build().unwrap().block
};

Expand Down Expand Up @@ -553,7 +553,7 @@ fn follow_new_best_sets_best_after_it_is_imported() {

let unknown_block = {
let sproof = sproof_with_parent_by_hash(&client, block.hash());
let block_builder = client.init_block_builder_at(block.hash(), None, sproof);
let block_builder = client.init_block_builder_at(block.hash(), None, sproof).block_builder;
block_builder.build().unwrap().block
};

Expand Down Expand Up @@ -665,8 +665,8 @@ fn do_not_set_best_block_to_older_block() {
fn prune_blocks_on_level_overflow() {
// Here we are using the timestamp value to generate blocks with different hashes.
const LEVEL_LIMIT: usize = 3;
const TIMESTAMP_MULTIPLIER: u64 = 60000;

let mut ts_producer = std::iter::successors(Some(0), |&x| Some(x + 6000));
let backend = Arc::new(Backend::new_test(1000, 3));
let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build());
let mut para_import = ParachainBlockImport::new_with_limit(
Expand All @@ -675,13 +675,14 @@ fn prune_blocks_on_level_overflow() {
LevelLimit::Some(LEVEL_LIMIT),
);

let best_hash = client.chain_info().best_hash;
let block0 = build_and_import_block_ext(
&client,
BlockOrigin::NetworkInitialSync,
true,
&mut para_import,
None,
None,
Some(best_hash),
ts_producer.next(),
None,
);
let id0 = block0.header.hash();
Expand All @@ -694,22 +695,22 @@ fn prune_blocks_on_level_overflow() {
i == 1,
&mut para_import,
Some(id0),
Some(i as u64 * TIMESTAMP_MULTIPLIER),
ts_producer.next(),
None,
)
})
.collect::<Vec<_>>();
let id10 = blocks1[0].header.hash();

let blocks2 = (0..2)
.map(|i| {
.map(|_| {
build_and_import_block_ext(
&client,
BlockOrigin::Own,
false,
&mut para_import,
Some(id10),
Some(i as u64 * TIMESTAMP_MULTIPLIER),
ts_producer.next(),
None,
)
})
Expand Down Expand Up @@ -738,7 +739,7 @@ fn prune_blocks_on_level_overflow() {
false,
&mut para_import,
Some(id0),
Some(LEVEL_LIMIT as u64 * TIMESTAMP_MULTIPLIER),
ts_producer.next(),
None,
);

Expand All @@ -758,7 +759,7 @@ fn prune_blocks_on_level_overflow() {
false,
&mut para_import,
Some(id0),
Some(2 * LEVEL_LIMIT as u64 * TIMESTAMP_MULTIPLIER),
ts_producer.next(),
None,
);

Expand All @@ -779,8 +780,8 @@ fn prune_blocks_on_level_overflow() {
fn restore_limit_monitor() {
// Here we are using the timestamp value to generate blocks with different hashes.
const LEVEL_LIMIT: usize = 2;
const TIMESTAMP_MULTIPLIER: u64 = 60000;

// Iterator that produces a new timestamp in the next slot
let mut ts_producer = std::iter::successors(Some(0), |&x| Some(x + 6000));
let backend = Arc::new(Backend::new_test(1000, 3));
let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build());

Expand All @@ -791,13 +792,14 @@ fn restore_limit_monitor() {
LevelLimit::Some(usize::MAX),
);

let best_hash = client.chain_info().best_hash;
let block00 = build_and_import_block_ext(
&client,
BlockOrigin::NetworkInitialSync,
true,
&mut para_import,
None,
None,
Some(best_hash),
ts_producer.next(),
None,
);
let id00 = block00.header.hash();
Expand All @@ -810,26 +812,24 @@ fn restore_limit_monitor() {
i == 1,
&mut para_import,
Some(id00),
Some(i as u64 * TIMESTAMP_MULTIPLIER),
ts_producer.next(),
None,
)
})
.collect::<Vec<_>>();
let id10 = blocks1[0].header.hash();

let _ = (0..LEVEL_LIMIT)
.map(|i| {
build_and_import_block_ext(
&client,
BlockOrigin::Own,
false,
&mut para_import,
Some(id10),
Some(i as u64 * TIMESTAMP_MULTIPLIER),
None,
)
})
.collect::<Vec<_>>();
for _ in 0..LEVEL_LIMIT {
build_and_import_block_ext(
&client,
BlockOrigin::Own,
false,
&mut para_import,
Some(id10),
ts_producer.next(),
None,
);
}

// Scenario before limit application (with B11 imported as best)
// Import order (freshness): B00, B10, B11, B12, B20, B21
Expand Down Expand Up @@ -860,7 +860,7 @@ fn restore_limit_monitor() {
false,
&mut para_import,
Some(id00),
Some(LEVEL_LIMIT as u64 * TIMESTAMP_MULTIPLIER),
ts_producer.next(),
None,
);

Expand Down
2 changes: 2 additions & 0 deletions cumulus/pallets/parachain-system/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,12 @@ sp-keyring = { path = "../../../substrate/primitives/keyring" }
sp-crypto-hashing = { path = "../../../substrate/primitives/crypto/hashing" }
sp-tracing = { path = "../../../substrate/primitives/tracing" }
sp-version = { path = "../../../substrate/primitives/version" }
sp-consensus-slots = { path = "../../../substrate/primitives/consensus/slots" }

# Cumulus
cumulus-test-client = { path = "../../test/client" }
cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" }
cumulus-test-runtime = { path = "../../test/runtime" }

[features]
default = ["std"]
Expand Down
Loading

0 comments on commit df818d2

Please sign in to comment.