Skip to content

Commit

Permalink
test: enable simple resharding v3 test (near#12191)
Browse files Browse the repository at this point in the history
Do minimal changes to the code allowing to check that number of shards
in the new epoch increased.

This code can be reused to test each separate component for resharding
v3:
* non-contiguous shard ids
* state sync
* memtries resharding
* ...

To make the test pass, nodes must track all shards for now, because
state sync is not implemented yet. So every node must think that it has
enough state to skip state sync.

Note that it doesn't mean at all that resharding works already. State is
also not properly constructed yet, so tx processing will either be
incorrect or crash the node.
  • Loading branch information
Longarithm authored Oct 8, 2024
1 parent e129b31 commit d0a33c3
Show file tree
Hide file tree
Showing 4 changed files with 64 additions and 22 deletions.
8 changes: 7 additions & 1 deletion chain/chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2347,7 +2347,13 @@ impl Chain {
) -> bool {
let result = epoch_manager.will_shard_layout_change(parent_hash);
let will_shard_layout_change = match result {
Ok(will_shard_layout_change) => will_shard_layout_change,
Ok(_will_shard_layout_change) => {
// TODO(#11881): before state sync is fixed, we don't catch up
// split shards. Assume that all needed shards are tracked
// already.
// will_shard_layout_change,
false
}
Err(err) => {
// TODO(resharding) This is a problem, if this happens the node
// will not perform resharding and fall behind the network.
Expand Down
9 changes: 9 additions & 0 deletions core/primitives/src/epoch_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,15 @@ pub struct EpochConfig {
pub validator_selection_config: ValidatorSelectionConfig,
}

impl EpochConfig {
/// Total number of validator seats in the epoch since protocol version 69.
pub fn num_validators(&self) -> NumSeats {
self.num_block_producer_seats
.max(self.validator_selection_config.num_chunk_producer_seats)
.max(self.validator_selection_config.num_chunk_validator_seats)
}
}

#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ShardConfig {
pub num_block_producer_seats_per_shard: Vec<NumSeats>,
Expand Down
20 changes: 13 additions & 7 deletions integration-tests/src/test_loop/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ pub(crate) struct TestLoopBuilder {
config_modifier: Option<Box<dyn Fn(&mut ClientConfig, usize)>>,
/// Whether to do the warmup or not. See `skip_warmup` for more details.
warmup: bool,
/// Whether all nodes must track all shards.
track_all_shards: bool,
}

impl TestLoopBuilder {
Expand All @@ -91,6 +93,7 @@ impl TestLoopBuilder {
runtime_config_store: None,
config_modifier: None,
warmup: true,
track_all_shards: false,
}
}

Expand Down Expand Up @@ -170,6 +173,11 @@ impl TestLoopBuilder {
self
}

pub fn track_all_shards(mut self) -> Self {
self.track_all_shards = true;
self
}

/// Overrides the tempdir (which contains state dump, etc.) instead
/// of creating a new one.
pub fn test_loop_data_dir(mut self, dir: TempDir) -> Self {
Expand Down Expand Up @@ -270,13 +278,11 @@ impl TestLoopBuilder {
// Configure tracked shards.
// * single shard tracking for validators
// * all shard tracking for non-validators (RPCs and archival)
let epoch_config = epoch_config_store.get_config(genesis.config.protocol_version);
let num_block_producer = epoch_config.num_block_producer_seats;
let num_chunk_producer = epoch_config.validator_selection_config.num_chunk_producer_seats;
let num_chunk_validator = epoch_config.validator_selection_config.num_chunk_validator_seats;
let validator_num =
num_block_producer.max(num_chunk_producer).max(num_chunk_validator) as usize;
if idx < validator_num {
let is_validator = {
let epoch_config = epoch_config_store.get_config(genesis.config.protocol_version);
idx < epoch_config.num_validators() as usize
};
if is_validator && !self.track_all_shards {
client_config.tracked_shards = Vec::new();
} else {
client_config.tracked_shards = vec![666];
Expand Down
49 changes: 35 additions & 14 deletions integration-tests/src/test_loop/tests/resharding_v3.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,17 @@ use crate::test_loop::env::TestLoopEnv;
use crate::test_loop::utils::ONE_NEAR;

/// Stub for checking Resharding V3.
/// After uncommenting panics with
/// StorageInconsistentState("Failed to find root node ... in memtrie")
/// TODO(#11881): add the following scenarios:
/// - Shard ids should not be contiguous. For now we reuse existing shard id
/// which is incorrect!!!
/// - Nodes must not track all shards. State sync must succeed.
/// - Set up chunk validator-only nodes. State witness must pass validation.
/// - Consistent tx load. All txs must succeed.
/// - Delayed receipts, congestion control computation.
/// - Cross-shard receipts of all kinds, crossing resharding boundary.
/// - Shard layout v2 -> v2 transition.
/// - Shard layout can be taken from mainnet.
#[test]
#[ignore]
fn test_resharding_v3() {
if !ProtocolFeature::SimpleNightshadeV4.enabled(PROTOCOL_VERSION) {
return;
Expand All @@ -28,12 +35,13 @@ fn test_resharding_v3() {
let builder = TestLoopBuilder::new();

let initial_balance = 1_000_000 * ONE_NEAR;
let epoch_length = 10;
let epoch_length = 6;
let accounts =
(0..8).map(|i| format!("account{}", i).parse().unwrap()).collect::<Vec<AccountId>>();
let clients = accounts.iter().cloned().collect_vec();
let block_and_chunk_producers = (0..8).map(|idx| accounts[idx].as_str()).collect_vec();
// TODO: set up chunk validator-only nodes.
// #12195 prevents number of BPs bigger than `epoch_length`.
let clients = vec![accounts[0].clone(), accounts[3].clone(), accounts[6].clone()];
let block_and_chunk_producers =
clients.iter().map(|account: &AccountId| account.as_str()).collect_vec();

// Prepare shard split configuration.
let base_epoch_config_store = EpochConfigStore::for_chain_id("mainnet").unwrap();
Expand All @@ -42,6 +50,12 @@ fn test_resharding_v3() {
base_epoch_config_store.get_config(base_protocol_version).as_ref().clone();
base_epoch_config.validator_selection_config.shuffle_shard_assignment_for_chunk_producers =
false;
// TODO(#11881): enable kickouts when blocks and chunks are produced
// properly.
base_epoch_config.block_producer_kickout_threshold = 0;
base_epoch_config.chunk_producer_kickout_threshold = 0;
base_epoch_config.chunk_validator_only_kickout_threshold = 0;
base_epoch_config.shard_layout = ShardLayout::v1(vec!["account3".parse().unwrap()], None, 3);
let base_shard_layout = base_epoch_config.shard_layout.clone();
let mut epoch_config = base_epoch_config.clone();
let mut boundary_accounts = base_shard_layout.boundary_accounts().clone();
Expand All @@ -50,9 +64,12 @@ fn test_resharding_v3() {
let last_shard_id = shard_ids.pop().unwrap();
let mut shards_split_map: BTreeMap<ShardId, Vec<ShardId>> =
shard_ids.iter().map(|shard_id| (*shard_id, vec![*shard_id])).collect();
shard_ids.extend([max_shard_id + 1, max_shard_id + 2]);
shards_split_map.insert(last_shard_id, vec![max_shard_id + 1, max_shard_id + 2]);
boundary_accounts.push(AccountId::try_from("x.near".to_string()).unwrap());
// TODO(#11881): keep this way until non-contiguous shard ids are supported.
// let new_shards = vec![max_shard_id + 1, max_shard_id + 2];
let new_shards = vec![max_shard_id, max_shard_id + 1];
shard_ids.extend(new_shards.clone());
shards_split_map.insert(last_shard_id, new_shards);
boundary_accounts.push(AccountId::try_from("xyz.near".to_string()).unwrap());
epoch_config.shard_layout =
ShardLayout::v2(boundary_accounts, shard_ids, Some(shards_split_map));
let expected_num_shards = epoch_config.shard_layout.shard_ids().count();
Expand All @@ -73,8 +90,12 @@ fn test_resharding_v3() {
}
let (genesis, _) = genesis_builder.build();

let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } =
builder.genesis(genesis).epoch_config_store(epoch_config_store).clients(clients).build();
let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = builder
.genesis(genesis)
.epoch_config_store(epoch_config_store)
.clients(clients)
.track_all_shards()
.build();

let client_handle = node_datas[0].client_sender.actor_handle();
let success_condition = |test_loop_data: &mut TestLoopData| -> bool {
Expand All @@ -89,8 +110,8 @@ fn test_resharding_v3() {

test_loop.run_until(
success_condition,
// Timeout at producing 5 epochs, approximately.
Duration::seconds((5 * epoch_length) as i64),
// Give enough time to produce ~6 epochs.
Duration::seconds((6 * epoch_length) as i64),
);

TestLoopEnv { test_loop, datas: node_datas, tempdir }
Expand Down

0 comments on commit d0a33c3

Please sign in to comment.