Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

babe: allow skipping over empty epochs #11727

Merged
merged 20 commits into from
Dec 24, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 46 additions & 3 deletions client/consensus/babe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -543,7 +543,7 @@ fn aux_storage_cleanup<C: HeaderMetadata<Block> + HeaderBackend<Block>, Block: B
let stale_forks = match client.expand_forks(&notification.stale_heads) {
Ok(stale_forks) => stale_forks,
Err((stale_forks, e)) => {
warn!(target: "babe", "{:?}", e,);
warn!(target: LOG_TARGET, "{:?}", e);
stale_forks
},
};
Expand Down Expand Up @@ -1511,11 +1511,12 @@ where
if let Some(next_epoch_descriptor) = next_epoch_digest {
old_epoch_changes = Some((*epoch_changes).clone());

let viable_epoch = epoch_changes
let mut viable_epoch = epoch_changes
.viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot))
.ok_or_else(|| {
ConsensusError::ClientImport(Error::<Block>::FetchEpoch(parent_hash).into())
})?;
})?
.into_cloned();

let epoch_config = next_config_digest
.map(Into::into)
Expand All @@ -1528,6 +1529,48 @@ where
log::Level::Info
};

if viable_epoch.as_ref().end_slot() <= slot {
bkchr marked this conversation as resolved.
Show resolved Hide resolved
// some epochs must have been skipped as our current slot
// fits outside the current epoch. we will figure out
// which epoch it belongs to and we will re-use the same
// data for that epoch
let mut epoch_data = viable_epoch.as_mut();
let skipped_epochs =
*slot.saturating_sub(epoch_data.start_slot) / epoch_data.duration;

// NOTE: notice that we are only updating a local copy of the `Epoch`, this
// makes it so that when we insert the next epoch into `EpochChanges` below
// (after incrementing it), it will use the correct epoch index and start slot.
// we do not update the original epoch that will be re-used because there might
// be other forks (that we haven't imported) where the epoch isn't skipped, and
// to import those forks we want to keep the original epoch data. not updating
// the original epoch works because when we search the tree for which epoch to
// use for a given slot, we will search in-depth with the predicate
// `epoch.start_slot <= slot` which will still match correctly without updating
// `start_slot` to the correct value as below.
andresilva marked this conversation as resolved.
Show resolved Hide resolved
let epoch_index = epoch_data.epoch_index.checked_add(skipped_epochs).expect(
"epoch number is u64; it should be strictly smaller than number of slots; \
slots relate in some way to wall clock time; \
if u64 is not enough we should crash for safety; qed.",
);

let start_slot = skipped_epochs
.checked_mul(epoch_data.duration)
.and_then(|skipped_slots| epoch_data.start_slot.checked_add(skipped_slots))
.expect(
"slot number is u64; it should relate in some way to wall clock time; \
if u64 is not enough we should crash for safety; qed.",
);

warn!(
target: LOG_TARGET,
"👶 Epoch(s) skipped: from {} to {}", epoch_data.epoch_index, epoch_index,
);

epoch_data.epoch_index = epoch_index;
epoch_data.start_slot = Slot::from(start_slot);
}

log!(
target: LOG_TARGET,
log_level,
Expand Down
261 changes: 261 additions & 0 deletions client/consensus/babe/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ use rand_chacha::{
use sc_block_builder::{BlockBuilder, BlockBuilderProvider};
use sc_client_api::{backend::TransactionFor, BlockchainEvents, Finalizer};
use sc_consensus::{BoxBlockImport, BoxJustificationImport};
use sc_consensus_epochs::{EpochIdentifier, EpochIdentifierPosition};
use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging;
use sc_network_test::{Block as TestBlock, *};
use sp_application_crypto::key_types::BABE;
Expand Down Expand Up @@ -1109,3 +1110,263 @@ async fn obsolete_blocks_aux_data_cleanup() {
// Present C4, C5
assert!(aux_data_check(&fork3_hashes, true));
}

#[tokio::test]
async fn allows_skipping_epochs() {
let mut net = BabeTestNet::new(1);

let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");

let client = peer.client().as_client();
let mut block_import = data.block_import.lock().take().expect("import set up during init");

let mut proposer_factory = DummyFactory {
client: client.clone(),
config: data.link.config.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};

let epoch_changes = data.link.epoch_changes.clone();
let epoch_length = data.link.config.epoch_length;

// we create all of the blocks in epoch 0 as well as a block in epoch 1
let blocks = propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
client.chain_info().genesis_hash,
epoch_length as usize + 1,
)
.await;

// the first block in epoch 0 (#1) announces both epoch 0 and 1 (this is a
// special genesis epoch)
let epoch0 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Genesis0,
hash: blocks[0],
number: 1,
})
.unwrap()
.clone();

assert_eq!(epoch0.epoch_index, 0);
assert_eq!(epoch0.start_slot, Slot::from(1));

let epoch1 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Genesis1,
hash: blocks[0],
number: 1,
})
.unwrap()
.clone();

assert_eq!(epoch1.epoch_index, 1);
assert_eq!(epoch1.start_slot, Slot::from(epoch_length + 1));

// the first block in epoch 1 (#7) announces epoch 2. we will be skipping
// this epoch and therefore re-using its data for epoch 3
let epoch2 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Regular,
hash: blocks[epoch_length as usize],
number: epoch_length + 1,
})
.unwrap()
.clone();

assert_eq!(epoch2.epoch_index, 2);
assert_eq!(epoch2.start_slot, Slot::from(epoch_length * 2 + 1));

// we now author a block that belongs to epoch 3, thereby skipping epoch 2
let last_block = client.expect_header(*blocks.last().unwrap()).unwrap();
let block = propose_and_import_block(
&last_block,
Some((epoch_length * 3 + 1).into()),
&mut proposer_factory,
&mut block_import,
)
.await;

// and the first block in epoch 3 (#8) announces epoch 4
let epoch4 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Regular,
hash: block,
number: epoch_length + 2,
})
.unwrap()
.clone();

assert_eq!(epoch4.epoch_index, 4);
assert_eq!(epoch4.start_slot, Slot::from(epoch_length * 4 + 1));

// if we try to get the epoch data for a slot in epoch 3
let epoch3 = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&block,
epoch_length + 2,
(epoch_length * 3 + 2).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();

// we get back the data for epoch 2
assert_eq!(epoch3, epoch2);

// but if we try to get the epoch data for a slot in epoch 4
let epoch4_ = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&block,
epoch_length + 2,
(epoch_length * 4 + 1).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();

// we get epoch 4 as expected
assert_eq!(epoch4, epoch4_);
}

#[tokio::test]
async fn allows_skipping_epochs_on_some_forks() {
let mut net = BabeTestNet::new(1);

let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");

let client = peer.client().as_client();
let mut block_import = data.block_import.lock().take().expect("import set up during init");

let mut proposer_factory = DummyFactory {
client: client.clone(),
config: data.link.config.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};

let epoch_changes = data.link.epoch_changes.clone();
let epoch_length = data.link.config.epoch_length;

// we create all of the blocks in epoch 0 as well as two blocks in epoch 1
let blocks = propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
client.chain_info().genesis_hash,
epoch_length as usize + 1,
)
.await;

// we now author a block that belongs to epoch 2, built on top of the last
// authored block in epoch 1.
let last_block = client.expect_header(*blocks.last().unwrap()).unwrap();

let epoch2_block = propose_and_import_block(
&last_block,
Some((epoch_length * 2 + 1).into()),
&mut proposer_factory,
&mut block_import,
)
.await;

// if we try to get the epoch data for a slot in epoch 2, we get the data that
// was previously announced when epoch 1 started
let epoch2 = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&epoch2_block,
epoch_length + 2,
(epoch_length * 2 + 2).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();

// we now author a block that belongs to epoch 3, built on top of the last
// authored block in epoch 1. authoring this block means we're skipping epoch 2
// entirely on this fork
let epoch3_block = propose_and_import_block(
&last_block,
Some((epoch_length * 3 + 1).into()),
&mut proposer_factory,
&mut block_import,
)
.await;

// if we try to get the epoch data for a slot in epoch 3
let epoch3_ = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&epoch3_block,
epoch_length + 2,
(epoch_length * 3 + 2).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();

// we get back the data for epoch 2
assert_eq!(epoch3_, epoch2);

// if we try to get the epoch data for a slot in epoch 4 in the fork
// where we skipped epoch 2, we should get the epoch data for epoch 4
// that was announced at the beginning of epoch 3
let epoch_data = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&epoch3_block,
epoch_length + 2,
(epoch_length * 4 + 1).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();

assert!(epoch_data != epoch3_);

// if we try to get the epoch data for a slot in epoch 4 in the fork
// where we didn't skip epoch 2, we should get back the data for epoch 3,
// that was announced when epoch 2 started in that fork
let epoch_data = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&epoch2_block,
epoch_length + 2,
(epoch_length * 4 + 1).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();

assert!(epoch_data != epoch3_);

let epoch3 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Regular,
hash: epoch2_block,
number: epoch_length + 2,
})
.unwrap()
.clone();

assert_eq!(epoch_data, epoch3);
}
Loading