-
Notifications
You must be signed in to change notification settings - Fork 622
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
test: enable simple resharding v3 test #12191
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,10 +15,17 @@ use crate::test_loop::env::TestLoopEnv; | |
use crate::test_loop::utils::ONE_NEAR; | ||
|
||
/// Stub for checking Resharding V3. | ||
/// After uncommenting panics with | ||
/// StorageInconsistentState("Failed to find root node ... in memtrie") | ||
/// TODO(#11881): add the following scenarios: | ||
/// - Shard ids should not be contiguous. For now we reuse existing shard id | ||
/// which is incorrect!!! | ||
/// - Nodes must not track all shards. State sync must succeed. | ||
/// - Set up chunk validator-only nodes. State witness must pass validation. | ||
/// - Consistent tx load. All txs must succeed. | ||
/// - Delayed receipts, congestion control computation. | ||
/// - Cross-shard receipts of all kinds, crossing resharding boundary. | ||
/// - Shard layout v2 -> v2 transition. | ||
/// - Shard layout can be taken from mainnet. | ||
#[test] | ||
#[ignore] | ||
fn test_resharding_v3() { | ||
if !ProtocolFeature::SimpleNightshadeV4.enabled(PROTOCOL_VERSION) { | ||
return; | ||
|
@@ -28,12 +35,13 @@ fn test_resharding_v3() { | |
let builder = TestLoopBuilder::new(); | ||
|
||
let initial_balance = 1_000_000 * ONE_NEAR; | ||
let epoch_length = 10; | ||
let epoch_length = 6; | ||
let accounts = | ||
(0..8).map(|i| format!("account{}", i).parse().unwrap()).collect::<Vec<AccountId>>(); | ||
let clients = accounts.iter().cloned().collect_vec(); | ||
let block_and_chunk_producers = (0..8).map(|idx| accounts[idx].as_str()).collect_vec(); | ||
// TODO: set up chunk validator-only nodes. | ||
// #12195 prevents number of BPs bigger than `epoch_length`. | ||
let clients = vec![accounts[0].clone(), accounts[3].clone(), accounts[6].clone()]; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why do you only pick a few accounts here? Can you add a comment? |
||
let block_and_chunk_producers = | ||
clients.iter().map(|account: &AccountId| account.as_str()).collect_vec(); | ||
|
||
// Prepare shard split configuration. | ||
let base_epoch_config_store = EpochConfigStore::for_chain_id("mainnet").unwrap(); | ||
|
@@ -42,6 +50,12 @@ fn test_resharding_v3() { | |
base_epoch_config_store.get_config(base_protocol_version).as_ref().clone(); | ||
base_epoch_config.validator_selection_config.shuffle_shard_assignment_for_chunk_producers = | ||
false; | ||
// TODO(#11881): enable kickouts when blocks and chunks are produced | ||
// properly. | ||
base_epoch_config.block_producer_kickout_threshold = 0; | ||
base_epoch_config.chunk_producer_kickout_threshold = 0; | ||
base_epoch_config.chunk_validator_only_kickout_threshold = 0; | ||
Comment on lines
+55
to
+57
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why no kickouts? Is it temporary until all is implemented? If so can you add a todo? |
||
base_epoch_config.shard_layout = ShardLayout::v1(vec!["account3".parse().unwrap()], None, 3); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should test also the case where we go from |
||
let base_shard_layout = base_epoch_config.shard_layout.clone(); | ||
let mut epoch_config = base_epoch_config.clone(); | ||
let mut boundary_accounts = base_shard_layout.boundary_accounts().clone(); | ||
|
@@ -50,9 +64,12 @@ fn test_resharding_v3() { | |
let last_shard_id = shard_ids.pop().unwrap(); | ||
let mut shards_split_map: BTreeMap<ShardId, Vec<ShardId>> = | ||
shard_ids.iter().map(|shard_id| (*shard_id, vec![*shard_id])).collect(); | ||
shard_ids.extend([max_shard_id + 1, max_shard_id + 2]); | ||
shards_split_map.insert(last_shard_id, vec![max_shard_id + 1, max_shard_id + 2]); | ||
boundary_accounts.push(AccountId::try_from("x.near".to_string()).unwrap()); | ||
// TODO(#11881): keep this way until non-contiguous shard ids are supported. | ||
// let new_shards = vec![max_shard_id + 1, max_shard_id + 2]; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: add todo? |
||
let new_shards = vec![max_shard_id, max_shard_id + 1]; | ||
shard_ids.extend(new_shards.clone()); | ||
shards_split_map.insert(last_shard_id, new_shards); | ||
boundary_accounts.push(AccountId::try_from("xyz.near".to_string()).unwrap()); | ||
epoch_config.shard_layout = | ||
ShardLayout::v2(boundary_accounts, shard_ids, Some(shards_split_map)); | ||
let expected_num_shards = epoch_config.shard_layout.shard_ids().count(); | ||
|
@@ -73,8 +90,12 @@ fn test_resharding_v3() { | |
} | ||
let (genesis, _) = genesis_builder.build(); | ||
|
||
let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = | ||
builder.genesis(genesis).epoch_config_store(epoch_config_store).clients(clients).build(); | ||
let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = builder | ||
.genesis(genesis) | ||
.epoch_config_store(epoch_config_store) | ||
.clients(clients) | ||
.track_all_shards() | ||
.build(); | ||
|
||
let client_handle = node_datas[0].client_sender.actor_handle(); | ||
let success_condition = |test_loop_data: &mut TestLoopData| -> bool { | ||
|
@@ -89,8 +110,8 @@ fn test_resharding_v3() { | |
|
||
test_loop.run_until( | ||
success_condition, | ||
// Timeout at producing 5 epochs, approximately. | ||
Duration::seconds((5 * epoch_length) as i64), | ||
// Give enough time to produce ~6 epochs. | ||
Duration::seconds((6 * epoch_length) as i64), | ||
); | ||
|
||
TestLoopEnv { test_loop, datas: node_datas, tempdir } | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
nit: could we add a todo here to enable this later?