From 2394e0cfa86fe864de7b01f974bd2fdd0eb0a1af Mon Sep 17 00:00:00 2001 From: robin-near <111538878+robin-near@users.noreply.github.com> Date: Tue, 13 Aug 2024 17:15:00 -0700 Subject: [PATCH] [Epoch Sync] Remove previous new_epoch_sync code. (#11929) The new design has little overlap with the old, so this PR removes the old code, in order to avoid confusion with the new one. This also gets rid of the insidious new_epoch_sync feature. This is all fine because most of this code was guarded by the new_epoch_sync feature. So almost none of these are compiled. What did get compiled were: - Client had an EpochSync struct, but without the feature, it didn't do anything. - The config had an epoch_sync_enabled flag, but without the feature, that flag isn't used. - Some integration tests supplied epoch_sync_enabled = true. I either removed the test or made it false. --- Cargo.lock | 16 - Cargo.toml | 1 - chain/chain-primitives/Cargo.toml | 2 - chain/chain-primitives/src/error.rs | 13 - chain/chain/Cargo.toml | 6 - chain/chain/src/chain.rs | 110 ---- chain/chain/src/chain_update.rs | 205 -------- chain/chain/src/garbage_collection.rs | 2 - chain/chain/src/store/mod.rs | 11 - chain/chain/src/test_utils/kv_runtime.rs | 12 - chain/client/Cargo.toml | 1 - chain/client/src/client.rs | 27 - chain/client/src/info.rs | 4 +- chain/client/src/sync/epoch.rs | 82 --- chain/client/src/sync/mod.rs | 1 - chain/client/src/test_utils/setup.rs | 30 +- chain/client/src/tests/bug_repros.rs | 3 - chain/client/src/tests/catching_up.rs | 7 - chain/client/src/tests/consensus.rs | 2 - chain/client/src/tests/cross_shard_tx.rs | 2 - chain/client/src/tests/query_client.rs | 1 - chain/epoch-manager/Cargo.toml | 1 - chain/epoch-manager/src/adapter.rs | 38 -- chain/epoch-manager/src/lib.rs | 58 --- chain/jsonrpc/jsonrpc-tests/src/lib.rs | 8 +- .../jsonrpc-tests/tests/rpc_transactions.rs | 2 +- core/chain-configs/src/client_config.rs | 8 - core/chain-configs/src/lib.rs | 27 +- core/primitives/Cargo.toml | 1 - core/primitives/src/epoch_manager.rs | 162 ------ core/primitives/src/epoch_sync.rs | 34 -- core/primitives/src/errors.rs | 25 - core/primitives/src/lib.rs | 1 - core/store/Cargo.toml | 1 - core/store/src/columns.rs | 10 - integration-tests/Cargo.toml | 1 - integration-tests/src/test_loop/builder.rs | 3 +- .../tests/simple_test_loop_example.rs | 1 - .../src/tests/client/chunks_management.rs | 2 - .../src/tests/client/epoch_sync.rs | 491 ------------------ integration-tests/src/tests/client/mod.rs | 2 - .../src/tests/client/process_blocks.rs | 3 - .../src/tests/client/sync_state_nodes.rs | 13 - integration-tests/src/tests/mod.rs | 2 - .../src/tests/nearcore/node_cluster.rs | 1 - .../src/tests/nearcore/stake_nodes.rs | 1 - .../src/tests/nearcore/sync_nodes.rs | 2 - integration-tests/src/tests/nearcore_utils.rs | 2 - integration-tests/src/tests/network/runner.rs | 3 +- nearcore/Cargo.toml | 3 - nearcore/src/config.rs | 39 +- neard/Cargo.toml | 2 - neard/src/cli.rs | 10 - tools/epoch-sync/Cargo.toml | 44 -- tools/epoch-sync/src/cli.rs | 301 ----------- tools/epoch-sync/src/lib.rs | 4 - 56 files changed, 47 insertions(+), 1797 deletions(-) delete mode 100644 chain/client/src/sync/epoch.rs delete mode 100644 core/primitives/src/epoch_sync.rs delete mode 100644 integration-tests/src/tests/client/epoch_sync.rs delete mode 100644 tools/epoch-sync/Cargo.toml delete mode 100644 tools/epoch-sync/src/cli.rs delete mode 100644 tools/epoch-sync/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 4d53d19ea26..b5034459587 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4304,21 +4304,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "near-epoch-sync-tool" -version = "0.0.0" -dependencies = [ - "anyhow", - "clap", - "near-chain", - "near-chain-configs", - "near-epoch-manager", - "near-primitives", - "near-store", - "nearcore", - "tracing", -] - [[package]] name = "near-flat-storage" version = "0.0.0" @@ -5374,7 +5359,6 @@ dependencies = [ "near-crypto", "near-database-tool", "near-dyn-configs", - "near-epoch-sync-tool", "near-flat-storage", "near-fork-network", "near-jsonrpc-primitives", diff --git a/Cargo.toml b/Cargo.toml index b89839423c3..ce2126d1e68 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -239,7 +239,6 @@ nearcore = { path = "nearcore" } near-crypto = { path = "core/crypto", default-features = false } near-dyn-configs = { path = "core/dyn-configs" } near-epoch-manager = { path = "chain/epoch-manager" } -near-epoch-sync-tool = { path = "tools/epoch-sync" } near-flat-storage = { path = "tools/flat-storage" } near-fork-network = { path = "tools/fork-network" } near-fmt = { path = "utils/fmt" } diff --git a/chain/chain-primitives/Cargo.toml b/chain/chain-primitives/Cargo.toml index d37b76b74de..daa533aa84a 100644 --- a/chain/chain-primitives/Cargo.toml +++ b/chain/chain-primitives/Cargo.toml @@ -20,5 +20,3 @@ near-time.workspace = true near-primitives.workspace = true near-crypto.workspace = true -[features] -new_epoch_sync = [] diff --git a/chain/chain-primitives/src/error.rs b/chain/chain-primitives/src/error.rs index 6b25ec3cb32..53b9f5012db 100644 --- a/chain/chain-primitives/src/error.rs +++ b/chain/chain-primitives/src/error.rs @@ -452,16 +452,3 @@ pub enum BlockKnownError { #[error("already known in invalid blocks")] KnownAsInvalid, } - -#[cfg(feature = "new_epoch_sync")] -pub mod epoch_sync { - #[derive(thiserror::Error, std::fmt::Debug)] - pub enum EpochSyncInfoError { - #[error(transparent)] - EpochSyncInfoErr(#[from] near_primitives::errors::epoch_sync::EpochSyncInfoError), - #[error(transparent)] - IOErr(#[from] std::io::Error), - #[error(transparent)] - ChainErr(#[from] crate::Error), - } -} diff --git a/chain/chain/Cargo.toml b/chain/chain/Cargo.toml index d6237886121..1ee86aa966f 100644 --- a/chain/chain/Cargo.toml +++ b/chain/chain/Cargo.toml @@ -73,12 +73,6 @@ test_features = [ ] shadow_chunk_validation = [] no_cache = ["near-store/no_cache"] -new_epoch_sync = [ - "near-store/new_epoch_sync", - "near-primitives/new_epoch_sync", - "near-epoch-manager/new_epoch_sync", - "near-chain-primitives/new_epoch_sync", -] protocol_feature_reject_blocks_with_outdated_protocol_version = [ "near-primitives/protocol_feature_reject_blocks_with_outdated_protocol_version", diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index 078b5826762..a95053fafad 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -43,8 +43,6 @@ use near_async::time::{Clock, Duration, Instant}; use near_chain_configs::{ MutableConfigValue, MutableValidatorSigner, ReshardingConfig, ReshardingHandle, }; -#[cfg(feature = "new_epoch_sync")] -use near_chain_primitives::error::epoch_sync::EpochSyncInfoError; use near_chain_primitives::error::{BlockKnownError, Error, LogTransientStorageError}; use near_epoch_manager::shard_tracker::ShardTracker; use near_epoch_manager::types::BlockHeaderInfo; @@ -58,10 +56,6 @@ use near_primitives::challenge::{ }; use near_primitives::checked_feature; use near_primitives::congestion_info::CongestionInfo; -#[cfg(feature = "new_epoch_sync")] -use near_primitives::epoch_manager::epoch_sync::EpochSyncInfo; -#[cfg(feature = "new_epoch_sync")] -use near_primitives::errors::epoch_sync::EpochSyncHashType; use near_primitives::errors::EpochError; use near_primitives::hash::{hash, CryptoHash}; use near_primitives::merkle::{ @@ -89,8 +83,6 @@ use near_primitives::types::{ NumBlocks, ShardId, StateRoot, }; use near_primitives::unwrap_or_return; -#[cfg(feature = "new_epoch_sync")] -use near_primitives::utils::index_to_bytes; use near_primitives::utils::MaybeValidated; use near_primitives::version::{ProtocolFeature, ProtocolVersion, PROTOCOL_VERSION}; use near_primitives::views::{ @@ -1577,14 +1569,6 @@ impl Chain { .add_validator_proposals(BlockHeaderInfo::new(header, last_finalized_height))?; chain_store_update.merge(epoch_manager_update); chain_store_update.commit()?; - - #[cfg(feature = "new_epoch_sync")] - { - // At this point BlockInfo for this header should be in DB and in `epoch_manager`s cache because of `add_validator_proposals` call. - let mut chain_update = self.chain_update(); - chain_update.save_epoch_sync_info_if_finalised(header)?; - chain_update.commit()?; - } } let mut chain_update = self.chain_update(); @@ -4615,100 +4599,6 @@ impl Chain { } } -/// Epoch sync specific functions. -#[cfg(feature = "new_epoch_sync")] -impl Chain { - /// TODO(posvyatokum): validate `epoch_sync_info` before `store_update` commit. - pub fn validate_and_record_epoch_sync_info( - &mut self, - epoch_sync_info: &EpochSyncInfo, - ) -> Result<(), EpochSyncInfoError> { - let store = self.chain_store().store().clone(); - let epoch_manager = self.epoch_manager.clone(); - let mut chain_store_update = self.chain_store.store_update(); - let mut store_update = store.store_update(); - - let epoch_id = epoch_sync_info.get_epoch_id()?; - // save EpochSyncInfo - - store_update.set_ser(DBCol::EpochSyncInfo, epoch_id.as_ref(), epoch_sync_info)?; - - // save EpochInfo's - - store_update.set_ser(DBCol::EpochInfo, epoch_id.as_ref(), &epoch_sync_info.epoch_info)?; - store_update.set_ser( - DBCol::EpochInfo, - epoch_sync_info.get_next_epoch_id()?.as_ref(), - &epoch_sync_info.next_epoch_info, - )?; - store_update.set_ser( - DBCol::EpochInfo, - epoch_sync_info.get_next_next_epoch_id()?.as_ref(), - &epoch_sync_info.next_next_epoch_info, - )?; - - // construct and save all new BlockMerkleTree's - - let mut cur_block_merkle_tree = (*chain_store_update - .get_block_merkle_tree(epoch_sync_info.get_epoch_first_header()?.prev_hash())?) - .clone(); - let mut prev_hash = epoch_sync_info.get_epoch_first_header()?.prev_hash(); - for hash in &epoch_sync_info.all_block_hashes { - cur_block_merkle_tree.insert(*prev_hash); - chain_store_update.save_block_merkle_tree(*hash, cur_block_merkle_tree.clone()); - prev_hash = hash; - } - - // save all block data in headers_to_save - - for hash in &epoch_sync_info.headers_to_save { - let header = epoch_sync_info.get_header(*hash, EpochSyncHashType::BlockToSave)?; - // check that block is not known already - if store.exists(DBCol::BlockHeader, hash.as_ref())? { - continue; - } - - store_update.insert_ser(DBCol::BlockHeader, header.hash().as_ref(), header)?; - store_update.set_ser( - DBCol::NextBlockHashes, - header.prev_hash().as_ref(), - header.hash(), - )?; - store_update.set_ser( - DBCol::BlockHeight, - &index_to_bytes(header.height()), - header.hash(), - )?; - store_update.set_ser( - DBCol::BlockOrdinal, - &index_to_bytes(header.block_ordinal()), - &header.hash(), - )?; - - store_update.insert_ser( - DBCol::BlockInfo, - hash.as_ref(), - &epoch_sync_info.get_block_info(hash)?, - )?; - } - - // save header head, final head, update epoch_manager aggregator - chain_store_update - .force_save_header_head(&Tip::from_header(epoch_sync_info.get_epoch_last_header()?))?; - chain_store_update.save_final_head(&Tip::from_header( - epoch_sync_info.get_epoch_last_finalised_header()?, - ))?; - epoch_manager - .force_update_aggregator(epoch_id, epoch_sync_info.get_epoch_last_finalised_hash()?); - - // TODO(posvyatokum): add EpochSyncInfo validation. - - chain_store_update.merge(store_update); - chain_store_update.commit()?; - Ok(()) - } -} - pub fn do_apply_chunks( block_hash: CryptoHash, block_height: BlockHeight, diff --git a/chain/chain/src/chain_update.rs b/chain/chain/src/chain_update.rs index ef0cc981260..10530b35647 100644 --- a/chain/chain/src/chain_update.rs +++ b/chain/chain/src/chain_update.rs @@ -16,10 +16,6 @@ use near_epoch_manager::EpochManagerAdapter; use near_primitives::apply::ApplyChunkReason; use near_primitives::block::{Block, Tip}; use near_primitives::block_header::BlockHeader; -#[cfg(feature = "new_epoch_sync")] -use near_primitives::epoch_block_info::BlockInfo; -#[cfg(feature = "new_epoch_sync")] -use near_primitives::epoch_manager::epoch_sync::EpochSyncInfo; use near_primitives::hash::CryptoHash; use near_primitives::shard_layout::{account_id_to_shard_uid, ShardUId}; use near_primitives::sharding::ShardChunk; @@ -29,8 +25,6 @@ use near_primitives::types::{BlockExtra, BlockHeight, BlockHeightDelta, NumShard use near_primitives::version::ProtocolFeature; use near_primitives::views::LightClientBlockView; use std::collections::HashMap; -#[cfg(feature = "new_epoch_sync")] -use std::collections::HashSet; use std::sync::Arc; use tracing::{debug, info, warn}; @@ -456,12 +450,6 @@ impl<'a> ChainUpdate<'a> { .add_validator_proposals(BlockHeaderInfo::new(block.header(), last_finalized_height))?; self.chain_store_update.merge(epoch_manager_update); - #[cfg(feature = "new_epoch_sync")] - { - // BlockInfo should be already recorded in epoch_manager cache because of `add_validator_proposals` call - self.save_epoch_sync_info_if_finalised(block.header())?; - } - // Add validated block to the db, even if it's not the canonical fork. self.chain_store_update.save_block(block.clone()); self.chain_store_update.inc_block_refcount(prev_hash)?; @@ -856,196 +844,3 @@ impl<'a> ChainUpdate<'a> { Ok(true) } } - -/// Epoch sync specific functions. -#[cfg(feature = "new_epoch_sync")] -impl<'a> ChainUpdate<'a> { - /// This function assumes `BlockInfo` is already retrievable from `epoch_manager`. - /// This can be achieved by calling `add_validator_proposals`. - pub(crate) fn save_epoch_sync_info_if_finalised( - &mut self, - header: &BlockHeader, - ) -> Result<(), Error> { - let block_info = self.epoch_manager.get_block_info(header.hash())?; - let epoch_first_block_hash = block_info.epoch_first_block(); - - if *epoch_first_block_hash == CryptoHash::default() { - // This is the genesis epoch. We don't have any fully finalised epoch yet. - return Ok(()); - } - - let epoch_first_block_info = self.epoch_manager.get_block_info(epoch_first_block_hash)?; - let prev_epoch_last_block_hash = epoch_first_block_info.prev_hash(); - - if *prev_epoch_last_block_hash == CryptoHash::default() { - // This is the genesis epoch. We don't have any fully finalised epoch yet. - return Ok(()); - } - let prev_epoch_last_block_info = - self.epoch_manager.get_block_info(prev_epoch_last_block_hash)?; - - if prev_epoch_last_block_info.epoch_id() == epoch_first_block_info.epoch_id() { - // Previous epoch is the genesis epoch. We don't have any fully finalised epoch yet. - return Ok(()); - } - - // Check that last finalised block is after epoch first block. - // So, that it is in the current epoch. - let last_final_block_hash = header.last_final_block(); - if *last_final_block_hash == CryptoHash::default() { - // We didn't finalise any blocks yet. We don't have any fully finalised epoch yet. - return Ok(()); - } - let last_final_block_info = self.epoch_manager.get_block_info(last_final_block_hash)?; - if last_final_block_info.epoch_id() != epoch_first_block_info.epoch_id() { - // Last finalised block is in the previous epoch. - // We didn't finalise header with `epoch_sync_data_hash` for the previous epoch yet. - return Ok(()); - } - if self.chain_store_update.store().exists( - near_store::DBCol::EpochSyncInfo, - prev_epoch_last_block_info.epoch_id().as_ref(), - )? { - // We already wrote `EpochSyncInfo` for this epoch. - // Probably during epoch sync. - return Ok(()); - } - self.save_epoch_sync_info_impl(&prev_epoch_last_block_info, epoch_first_block_hash) - } - - /// If the block is the last one in the epoch - /// construct and record `EpochSyncInfo` to `self.chain_store_update`. - fn save_epoch_sync_info_impl( - &mut self, - last_block_info: &BlockInfo, - next_epoch_first_hash: &CryptoHash, - ) -> Result<(), Error> { - let mut store_update = self.chain_store_update.store().store_update(); - store_update - .set_ser( - near_store::DBCol::EpochSyncInfo, - last_block_info.epoch_id().as_ref(), - &self.create_epoch_sync_info(last_block_info, next_epoch_first_hash, None)?, - ) - .map_err(near_primitives::errors::EpochError::from)?; - self.chain_store_update.merge(store_update); - Ok(()) - } - - /// Create a pair of `BlockHeader`s necessary to create `BlockInfo` for `block_hash`: - /// - header for `block_hash` - /// - header for `last_final_block` of `block_hash` header - fn get_header_pair( - &self, - block_hash: &CryptoHash, - ) -> Result<(BlockHeader, BlockHeader), Error> { - let header = self.chain_store_update.get_block_header(block_hash)?; - // `block_hash` can correspond to genesis block, for which there is no last final block recorded, - // because `last_final_block` for genesis is `CryptoHash::default()` - // Here we return just the same genesis block header as last known block header - // TODO(posvyatokum) process this case carefully in epoch sync validation - // TODO(posvyatokum) process this carefully in saving the parts of epoch sync data - let last_finalised_header = { - if *header.last_final_block() == CryptoHash::default() { - header.clone() - } else { - self.chain_store_update.get_block_header(header.last_final_block())? - } - }; - Ok((header, last_finalised_header)) - } - - /// For epoch sync we need to save: - /// - (*) first header of the next epoch (contains `epoch_sync_data_hash` for `EpochInfo` validation) - /// - first header of the epoch - /// - last header of the epoch - /// - prev last header of the epoch - /// - every header on chain from `last_final_block` to the end of the epoch - /// - (*) header of the `last_final_block` for each of previously mentioned headers - /// - /// Because headers may repeat between those points, we use one `HashMap` to store them indexed by hash. - /// - /// Headers not marked with (*) need to be saved on the syncing node. - /// Headers marked with (*) only needed for `EpochSyncInfo` validation. - fn get_epoch_sync_info_headers( - &self, - last_block_info: &BlockInfo, - next_epoch_first_hash: &CryptoHash, - ) -> Result<(HashMap, HashSet), Error> { - let mut headers = HashMap::new(); - let mut headers_to_save = HashSet::new(); - - let mut add_header = |block_hash: &CryptoHash| -> Result<(), Error> { - let (header, last_finalised_header) = self.get_header_pair(block_hash)?; - headers.insert(*header.hash(), header); - headers.insert(*last_finalised_header.hash(), last_finalised_header); - headers_to_save.insert(*block_hash); - Ok(()) - }; - - add_header(next_epoch_first_hash)?; - add_header(last_block_info.epoch_first_block())?; - add_header(last_block_info.hash())?; - add_header(last_block_info.prev_hash())?; - - // If we didn't add `last_final_block_hash` yet, go down the chain until we find it. - if last_block_info.hash() != last_block_info.last_final_block_hash() - && last_block_info.prev_hash() != last_block_info.last_final_block_hash() - { - let mut current_header = - self.chain_store_update.get_block_header(last_block_info.prev_hash())?; - while current_header.hash() != last_block_info.last_final_block_hash() { - // This only should happen if BlockInfo data is incorrect. - // Without this assert same BlockInfo will cause infinite loop instead of crash with a message. - assert!( - current_header.height() > last_block_info.last_finalized_height(), - "Reached block at height {:?} with hash {:?} from {:?}", - current_header.height(), - current_header.hash(), - last_block_info - ); - - // current_header was already added, as we start from current_header = prev_header. - current_header = - self.chain_store_update.get_block_header(current_header.prev_hash())?; - add_header(current_header.hash())?; - } - } - - // We don't need to save `next_epoch_first_hash` during `EpochSyncInfo` processing. - // It is only needed for validation. - headers_to_save.remove(next_epoch_first_hash); - - Ok((headers, headers_to_save)) - } - - /// Data that is necessary to prove Epoch in new Epoch Sync. - pub fn create_epoch_sync_info( - &self, - last_block_info: &BlockInfo, - next_epoch_first_hash: &CryptoHash, - hash_to_prev_hash: Option<&HashMap>, - ) -> Result { - let mut all_block_hashes = - self.epoch_manager.get_all_epoch_hashes(last_block_info, hash_to_prev_hash)?; - all_block_hashes.reverse(); - - let (headers, headers_to_save) = - self.get_epoch_sync_info_headers(last_block_info, next_epoch_first_hash)?; - - let epoch_id = last_block_info.epoch_id(); - let next_epoch_id = self.epoch_manager.get_next_epoch_id(last_block_info.hash())?; - let next_next_epoch_id = near_primitives::types::EpochId(*last_block_info.hash()); - - Ok(EpochSyncInfo { - all_block_hashes, - headers, - headers_to_save, - next_epoch_first_hash: *next_epoch_first_hash, - epoch_info: (*self.epoch_manager.get_epoch_info(epoch_id)?).clone(), - next_epoch_info: (*self.epoch_manager.get_epoch_info(&next_epoch_id)?).clone(), - next_next_epoch_info: (*self.epoch_manager.get_epoch_info(&next_next_epoch_id)?) - .clone(), - }) - } -} diff --git a/chain/chain/src/garbage_collection.rs b/chain/chain/src/garbage_collection.rs index acefa3d7f52..e7a6ac3d90f 100644 --- a/chain/chain/src/garbage_collection.rs +++ b/chain/chain/src/garbage_collection.rs @@ -1048,8 +1048,6 @@ impl<'a> ChainStoreUpdate<'a> { | DBCol::Misc | DBCol::_ReceiptIdToShardId => unreachable!(), - #[cfg(feature = "new_epoch_sync")] - DBCol::EpochSyncInfo => unreachable!(), } self.merge(store_update); } diff --git a/chain/chain/src/store/mod.rs b/chain/chain/src/store/mod.rs index f4695204709..8f0aadc59a6 100644 --- a/chain/chain/src/store/mod.rs +++ b/chain/chain/src/store/mod.rs @@ -10,8 +10,6 @@ use near_chain_primitives::error::Error; use near_epoch_manager::EpochManagerAdapter; use near_primitives::block::Tip; use near_primitives::checked_feature; -#[cfg(feature = "new_epoch_sync")] -use near_primitives::epoch_manager::epoch_sync::EpochSyncInfo; use near_primitives::errors::InvalidTxError; use near_primitives::hash::CryptoHash; use near_primitives::merkle::{MerklePath, PartialMerkleTree}; @@ -900,15 +898,6 @@ impl ChainStore { store_update.commit().map_err(|err| err.into()) } - /// Save epoch sync info - #[cfg(feature = "new_epoch_sync")] - pub fn get_epoch_sync_info(&self, epoch_id: &EpochId) -> Result { - option_to_not_found( - self.store.get_ser(DBCol::EpochSyncInfo, epoch_id.as_ref()), - "EpochSyncInfo", - ) - } - /// Retrieve the kinds of state changes occurred in a given block. /// /// We store different types of data, so we prefer to only expose minimal information about the diff --git a/chain/chain/src/test_utils/kv_runtime.rs b/chain/chain/src/test_utils/kv_runtime.rs index 117f4f8cc4b..2a83690ff9a 100644 --- a/chain/chain/src/test_utils/kv_runtime.rs +++ b/chain/chain/src/test_utils/kv_runtime.rs @@ -1043,18 +1043,6 @@ impl EpochManagerAdapter for MockEpochManager { Ok(vec) } - #[cfg(feature = "new_epoch_sync")] - fn get_all_epoch_hashes( - &self, - _last_block_info: &BlockInfo, - _hash_to_prev_hash: Option<&HashMap>, - ) -> Result, EpochError> { - Ok(vec![]) - } - - #[cfg(feature = "new_epoch_sync")] - fn force_update_aggregator(&self, _epoch_id: &EpochId, _hash: &CryptoHash) {} - fn get_epoch_all_validators( &self, _epoch_id: &EpochId, diff --git a/chain/client/Cargo.toml b/chain/client/Cargo.toml index 894f40e24d2..80ba1dfbd38 100644 --- a/chain/client/Cargo.toml +++ b/chain/client/Cargo.toml @@ -124,4 +124,3 @@ sandbox = [ "near-chain/sandbox", "near-o11y/sandbox", ] -new_epoch_sync = ["near-chain/new_epoch_sync"] diff --git a/chain/client/src/client.rs b/chain/client/src/client.rs index 941c62a7898..a388464f989 100644 --- a/chain/client/src/client.rs +++ b/chain/client/src/client.rs @@ -10,7 +10,6 @@ use crate::stateless_validation::chunk_validator::ChunkValidator; use crate::stateless_validation::partial_witness::partial_witness_actor::PartialWitnessSenderForClient; use crate::sync::adapter::SyncShardInfo; use crate::sync::block::BlockSync; -use crate::sync::epoch::EpochSync; use crate::sync::header::HeaderSync; use crate::sync::state::{StateSync, StateSyncResult}; use crate::SyncAdapter; @@ -97,12 +96,6 @@ use crate::client_actor::AdvProduceChunksMode; const NUM_REBROADCAST_BLOCKS: usize = 30; -/// The time we wait for the response to a Epoch Sync request before retrying -// TODO #3488 set 30_000 -pub const EPOCH_SYNC_REQUEST_TIMEOUT: Duration = Duration::milliseconds(1_000); -/// How frequently a Epoch Sync response can be sent to a particular peer -// TODO #3488 set 60_000 -pub const EPOCH_SYNC_PEER_TIMEOUT: Duration = Duration::milliseconds(10); /// Drop blocks whose height are beyond head + horizon if it is not in the current epoch. const BLOCK_HORIZON: u64 = 500; @@ -158,8 +151,6 @@ pub struct Client { /// storing the current status of the state sync and blocks catch up pub catchup_state_syncs: HashMap, BlocksCatchUpState)>, - /// Keeps track of information needed to perform the initial Epoch Sync - pub epoch_sync: EpochSync, /// Keeps track of syncing headers. pub header_sync: HeaderSync, /// Keeps track of syncing block. @@ -286,23 +277,6 @@ impl Client { let sharded_tx_pool = ShardedTransactionPool::new(rng_seed, config.transaction_pool_size_limit); let sync_status = SyncStatus::AwaitingPeers; - let genesis_block = chain.genesis_block(); - let epoch_sync = EpochSync::new( - clock.clone(), - network_adapter.clone(), - *genesis_block.header().epoch_id(), - *genesis_block.header().next_epoch_id(), - epoch_manager - .get_epoch_block_producers_ordered( - genesis_block.header().epoch_id(), - genesis_block.hash(), - )? - .iter() - .map(|x| x.0.clone()) - .collect(), - EPOCH_SYNC_REQUEST_TIMEOUT, - EPOCH_SYNC_PEER_TIMEOUT, - ); let header_sync = HeaderSync::new( clock.clone(), network_adapter.clone(), @@ -398,7 +372,6 @@ impl Client { NonZeroUsize::new(num_block_producer_seats).unwrap(), ), catchup_state_syncs: HashMap::new(), - epoch_sync, header_sync, block_sync, state_sync, diff --git a/chain/client/src/info.rs b/chain/client/src/info.rs index 33caca624d4..8988ae93b87 100644 --- a/chain/client/src/info.rs +++ b/chain/client/src/info.rs @@ -936,7 +936,7 @@ mod tests { #[test] fn test_telemetry_info() { - let config = ClientConfig::test(false, 1230, 2340, 50, false, true, true, true); + let config = ClientConfig::test(false, 1230, 2340, 50, false, true, true); let validator = MutableConfigValue::new(None, "validator_signer"); let info_helper = InfoHelper::new(Clock::real(), noop().into_sender(), &config); @@ -1043,7 +1043,7 @@ mod tests { ); // Then check that get_num_validators returns the correct number of validators. - let client_config = ClientConfig::test(false, 1230, 2340, 50, false, true, true, true); + let client_config = ClientConfig::test(false, 1230, 2340, 50, false, true, true); let mut info_helper = InfoHelper::new(Clock::real(), noop().into_sender(), &client_config); assert_eq!( num_validators, diff --git a/chain/client/src/sync/epoch.rs b/chain/client/src/sync/epoch.rs deleted file mode 100644 index 1af495d4d1e..00000000000 --- a/chain/client/src/sync/epoch.rs +++ /dev/null @@ -1,82 +0,0 @@ -use near_async::time::{Clock, Duration, Utc}; -use near_network::types::PeerManagerAdapter; -use near_primitives::hash::CryptoHash; -use near_primitives::network::PeerId; -use near_primitives::types::validator_stake::ValidatorStake; -use near_primitives::types::EpochId; -use std::collections::{HashMap, HashSet}; - -/// Helper to keep track of the Epoch Sync -// TODO #3488 -#[allow(dead_code)] -pub struct EpochSync { - clock: Clock, - network_adapter: PeerManagerAdapter, - /// Datastructure to keep track of when the last request to each peer was made. - /// Peers do not respond to Epoch Sync requests more frequently than once per a certain time - /// interval, thus there's no point in requesting more frequently. - peer_to_last_request_time: HashMap, - /// Tracks all the peers who have reported that we are already up to date - peers_reporting_up_to_date: HashSet, - /// The last epoch we are synced to - current_epoch_id: EpochId, - /// The next epoch id we need to sync - next_epoch_id: EpochId, - /// The block producers set to validate the light client block view for the next epoch - next_block_producers: Vec, - /// The last epoch id that we have requested - requested_epoch_id: EpochId, - /// When and to whom was the last request made - last_request_time: Utc, - last_request_peer_id: Option, - - /// How long to wait for a response before re-requesting the same light client block view - request_timeout: Duration, - /// How frequently to send request to the same peer - peer_timeout: Duration, - - /// True, if all peers agreed that we're at the last Epoch. - /// Only finalization is needed. - have_all_epochs: bool, - /// Whether the Epoch Sync was performed to completion previously. - /// Current state machine allows for only one Epoch Sync. - pub done: bool, - - pub sync_hash: CryptoHash, - - received_epoch: bool, - - is_just_started: bool, -} - -impl EpochSync { - pub fn new( - clock: Clock, - network_adapter: PeerManagerAdapter, - genesis_epoch_id: EpochId, - genesis_next_epoch_id: EpochId, - first_epoch_block_producers: Vec, - request_timeout: Duration, - peer_timeout: Duration, - ) -> Self { - Self { - clock: clock.clone(), - network_adapter, - peer_to_last_request_time: HashMap::new(), - peers_reporting_up_to_date: HashSet::new(), - current_epoch_id: genesis_epoch_id, - next_epoch_id: genesis_next_epoch_id, - next_block_producers: first_epoch_block_producers, - requested_epoch_id: genesis_epoch_id, - last_request_time: clock.now_utc(), - last_request_peer_id: None, - request_timeout, - peer_timeout, - received_epoch: false, - have_all_epochs: false, - done: false, - sync_hash: CryptoHash::default(), - is_just_started: true, - } - } -} diff --git a/chain/client/src/sync/mod.rs b/chain/client/src/sync/mod.rs index 462344f1926..a7933545d98 100644 --- a/chain/client/src/sync/mod.rs +++ b/chain/client/src/sync/mod.rs @@ -1,6 +1,5 @@ pub mod adapter; pub mod block; -pub mod epoch; pub mod external; pub mod header; pub mod state; diff --git a/chain/client/src/test_utils/setup.rs b/chain/client/src/test_utils/setup.rs index bf533a5982f..cde03a9ce30 100644 --- a/chain/client/src/test_utils/setup.rs +++ b/chain/client/src/test_utils/setup.rs @@ -85,7 +85,6 @@ pub fn setup( max_block_prod_time: u64, enable_doomslug: bool, archive: bool, - epoch_sync_enabled: bool, state_sync_enabled: bool, network_adapter: PeerManagerAdapter, transaction_validity_period: NumBlocks, @@ -128,7 +127,6 @@ pub fn setup( num_validator_seats, archive, true, - epoch_sync_enabled, state_sync_enabled, ); base.chunk_distribution_network = chunk_distribution_config; @@ -221,7 +219,6 @@ pub fn setup_only_view( max_block_prod_time: u64, enable_doomslug: bool, archive: bool, - epoch_sync_enabled: bool, state_sync_enabled: bool, network_adapter: PeerManagerAdapter, transaction_validity_period: NumBlocks, @@ -282,7 +279,6 @@ pub fn setup_only_view( num_validator_seats, archive, true, - epoch_sync_enabled, state_sync_enabled, ); @@ -316,7 +312,7 @@ pub fn setup_mock( ) -> PeerManagerMessageResponse, >, ) -> ActorHandlesForTesting { - setup_mock_with_validity_period_and_no_epoch_sync( + setup_mock_with_validity_period( clock, validators, account_id, @@ -327,7 +323,7 @@ pub fn setup_mock( ) } -pub fn setup_mock_with_validity_period_and_no_epoch_sync( +pub fn setup_mock_with_validity_period( clock: Clock, validators: Vec, account_id: AccountId, @@ -354,7 +350,6 @@ pub fn setup_mock_with_validity_period_and_no_epoch_sync( MAX_BLOCK_PROD_TIME.whole_milliseconds() as u64, enable_doomslug, false, - false, true, network_adapter.as_multi_sender(), transaction_validity_period, @@ -821,7 +816,6 @@ pub fn setup_mock_all_validators( epoch_length: BlockHeightDelta, enable_doomslug: bool, archive: Vec, - epoch_sync_enabled: Vec, check_block_stats: bool, chunk_distribution_config: Option, peer_manager_mock: Box< @@ -868,7 +862,6 @@ pub fn setup_mock_all_validators( let largest_skipped_height1 = largest_skipped_height.clone(); let hash_to_height1 = hash_to_height.clone(); let archive1 = archive.clone(); - let epoch_sync_enabled1 = epoch_sync_enabled.clone(); let chunk_distribution_config1 = chunk_distribution_config.clone(); let client_sender = LateBoundSender::new(); let client_sender1 = client_sender.clone(); @@ -914,7 +907,6 @@ pub fn setup_mock_all_validators( block_prod_time * 3, enable_doomslug, archive1[index], - epoch_sync_enabled1[index], false, pm.into_multi_sender(), 10000, @@ -944,7 +936,7 @@ pub fn setup_no_network( skip_sync_wait: bool, enable_doomslug: bool, ) -> ActorHandlesForTesting { - setup_no_network_with_validity_period_and_no_epoch_sync( + setup_no_network_with_validity_period( clock, validators, account_id, @@ -954,7 +946,7 @@ pub fn setup_no_network( ) } -pub fn setup_no_network_with_validity_period_and_no_epoch_sync( +pub fn setup_no_network_with_validity_period( clock: Clock, validators: Vec, account_id: AccountId, @@ -962,7 +954,7 @@ pub fn setup_no_network_with_validity_period_and_no_epoch_sync( transaction_validity_period: NumBlocks, enable_doomslug: bool, ) -> ActorHandlesForTesting { - setup_mock_with_validity_period_and_no_epoch_sync( + setup_mock_with_validity_period( clock, validators, account_id, @@ -992,16 +984,8 @@ pub fn setup_client_with_runtime( partial_witness_adapter: PartialWitnessSenderForClient, validator_signer: Arc, ) -> Client { - let mut config = ClientConfig::test( - true, - 10, - 20, - num_validator_seats, - archive, - save_trie_changes, - true, - true, - ); + let mut config = + ClientConfig::test(true, 10, 20, num_validator_seats, archive, save_trie_changes, true); config.epoch_length = chain_genesis.epoch_length; let state_sync_adapter = Arc::new(RwLock::new(SyncAdapter::new( noop().into_sender(), diff --git a/chain/client/src/tests/bug_repros.rs b/chain/client/src/tests/bug_repros.rs index c9a59f87a2d..b239d69c965 100644 --- a/chain/client/src/tests/bug_repros.rs +++ b/chain/client/src/tests/bug_repros.rs @@ -65,7 +65,6 @@ fn repro_1183() { 5, false, vec![false; validators.len()], - vec![true; validators.len()], false, None, Box::new(move |_, _account_id: _, msg: &PeerManagerMessageRequest| { @@ -194,7 +193,6 @@ fn test_sync_from_archival_node() { epoch_length, false, vec![true, false, false, false], - vec![false, true, true, true], false, None, Box::new( @@ -299,7 +297,6 @@ fn test_long_gap_between_blocks() { epoch_length, true, vec![false, false], - vec![true, true], false, None, Box::new( diff --git a/chain/client/src/tests/catching_up.rs b/chain/client/src/tests/catching_up.rs index ff727cc523f..19a5ced219a 100644 --- a/chain/client/src/tests/catching_up.rs +++ b/chain/client/src/tests/catching_up.rs @@ -144,7 +144,6 @@ fn test_catchup_receipts_sync_common(wait_till: u64, send: u64, sync_hold: bool) let (vs, key_pairs) = get_validators_and_key_pairs(); let archive = vec![true; vs.all_block_producers().count()]; - let epoch_sync_enabled = vec![false; vs.all_block_producers().count()]; let phase = Arc::new(RwLock::new(ReceiptsSyncPhases::WaitingForFirstBlock)); let seen_heights_with_receipts = Arc::new(RwLock::new(HashSet::::new())); @@ -166,7 +165,6 @@ fn test_catchup_receipts_sync_common(wait_till: u64, send: u64, sync_hold: bool) 5, false, archive, - epoch_sync_enabled, false, None, Box::new(move |_, _account_id: _, msg: &PeerManagerMessageRequest| { @@ -461,7 +459,6 @@ fn test_catchup_random_single_part_sync_common(skip_15: bool, non_zero: bool, he 5, true, vec![false; validators.len()], - vec![true; validators.len()], false, None, Box::new(move |_, _account_id: _, msg: &PeerManagerMessageRequest| { @@ -638,7 +635,6 @@ fn test_catchup_sanity_blocks_produced() { let (vs, key_pairs) = get_validators_and_key_pairs(); let vs = vs.validator_groups(2); let archive = vec![false; vs.all_block_producers().count()]; - let epoch_sync_enabled = vec![true; vs.all_block_producers().count()]; let (conn, _) = setup_mock_all_validators( Clock::real(), @@ -651,7 +647,6 @@ fn test_catchup_sanity_blocks_produced() { 5, true, archive, - epoch_sync_enabled, false, None, Box::new(move |_, _account_id: _, msg: &PeerManagerMessageRequest| { @@ -713,7 +708,6 @@ fn test_all_chunks_accepted_common( let (vs, key_pairs) = get_validators_and_key_pairs(); let archive = vec![false; vs.all_block_producers().count()]; - let epoch_sync_enabled = vec![true; vs.all_block_producers().count()]; let verbose = false; @@ -732,7 +726,6 @@ fn test_all_chunks_accepted_common( epoch_length, true, archive, - epoch_sync_enabled, false, None, Box::new(move |_, sender_account_id: AccountId, msg: &PeerManagerMessageRequest| { diff --git a/chain/client/src/tests/consensus.rs b/chain/client/src/tests/consensus.rs index 57d2f40bec6..f4e3430045f 100644 --- a/chain/client/src/tests/consensus.rs +++ b/chain/client/src/tests/consensus.rs @@ -54,7 +54,6 @@ fn test_consensus_with_epoch_switches() { ValidatorSchedule::new().num_shards(8).block_producers_per_epoch(validators.clone()); let key_pairs = (0..24).map(|_| PeerInfo::random()).collect::>(); let archive = vec![true; vs.all_block_producers().count()]; - let epoch_sync_enabled = vec![false; vs.all_block_producers().count()]; let block_to_prev_block = Arc::new(RwLock::new(HashMap::new())); let block_to_height = Arc::new(RwLock::new(HashMap::new())); @@ -79,7 +78,6 @@ fn test_consensus_with_epoch_switches() { 4, true, archive, - epoch_sync_enabled, false, None, Box::new(move |_, from_whom: AccountId, msg: &PeerManagerMessageRequest| { diff --git a/chain/client/src/tests/cross_shard_tx.rs b/chain/client/src/tests/cross_shard_tx.rs index 809be505b1c..7467758d81f 100644 --- a/chain/client/src/tests/cross_shard_tx.rs +++ b/chain/client/src/tests/cross_shard_tx.rs @@ -55,7 +55,6 @@ fn test_keyvalue_runtime_balances() { 5, false, vec![false; validators.len()], - vec![true; validators.len()], false, None, Box::new(move |_, _account_id: _, _msg: &PeerManagerMessageRequest| { @@ -452,7 +451,6 @@ fn test_cross_shard_tx_common( 20, test_doomslug, vec![true; validators.len()], - vec![false; validators.len()], true, None, Box::new(move |_, _account_id: _, _msg: &PeerManagerMessageRequest| { diff --git a/chain/client/src/tests/query_client.rs b/chain/client/src/tests/query_client.rs index dc660ab0788..20c6960f92a 100644 --- a/chain/client/src/tests/query_client.rs +++ b/chain/client/src/tests/query_client.rs @@ -232,7 +232,6 @@ fn test_state_request() { 400, false, true, - false, true, MockPeerManagerAdapter::default().into_multi_sender(), 100, diff --git a/chain/epoch-manager/Cargo.toml b/chain/epoch-manager/Cargo.toml index 2ab973e59f7..90505a318d1 100644 --- a/chain/epoch-manager/Cargo.toml +++ b/chain/epoch-manager/Cargo.toml @@ -53,4 +53,3 @@ nightly_protocol = [ "near-store/nightly_protocol", ] no_cache = [] -new_epoch_sync = ["near-store/new_epoch_sync", "near-primitives/new_epoch_sync"] diff --git a/chain/epoch-manager/src/adapter.rs b/chain/epoch-manager/src/adapter.rs index 7b3eb3d281c..2a7f8a3147f 100644 --- a/chain/epoch-manager/src/adapter.rs +++ b/chain/epoch-manager/src/adapter.rs @@ -1,6 +1,4 @@ use crate::types::BlockHeaderInfo; -#[cfg(feature = "new_epoch_sync")] -use crate::EpochInfoAggregator; use crate::EpochManagerHandle; use near_chain_primitives::Error; use near_crypto::Signature; @@ -25,8 +23,6 @@ use near_primitives::version::ProtocolVersion; use near_primitives::views::EpochValidatorInfo; use near_store::{ShardUId, StoreUpdate}; use std::cmp::Ordering; -#[cfg(feature = "new_epoch_sync")] -use std::collections::HashMap; use std::sync::Arc; /// A trait that abstracts the interface of the EpochManager. The two @@ -462,19 +458,6 @@ pub trait EpochManagerAdapter: Send + Sync { tip: &Tip, height: BlockHeight, ) -> Result, EpochError>; - - /// Returns a vector of all hashes in the epoch ending with `last_block_info`. - /// Only return blocks on chain of `last_block_info`. - /// Hashes are returned in the order from the last block to the first block. - #[cfg(feature = "new_epoch_sync")] - fn get_all_epoch_hashes( - &self, - last_block_info: &BlockInfo, - hash_to_prev_hash: Option<&HashMap>, - ) -> Result, EpochError>; - - #[cfg(feature = "new_epoch_sync")] - fn force_update_aggregator(&self, epoch_id: &EpochId, hash: &CryptoHash); } impl EpochManagerAdapter for EpochManagerHandle { @@ -1124,27 +1107,6 @@ impl EpochManagerAdapter for EpochManagerHandle { epoch_manager.possible_epochs_of_height_around_tip(tip, height) } - #[cfg(feature = "new_epoch_sync")] - fn get_all_epoch_hashes( - &self, - last_block_info: &BlockInfo, - hash_to_prev_hash: Option<&HashMap>, - ) -> Result, EpochError> { - let epoch_manager = self.read(); - match hash_to_prev_hash { - None => epoch_manager.get_all_epoch_hashes_from_db(last_block_info), - Some(hash_to_prev_hash) => { - epoch_manager.get_all_epoch_hashes_from_cache(last_block_info, hash_to_prev_hash) - } - } - } - - #[cfg(feature = "new_epoch_sync")] - fn force_update_aggregator(&self, epoch_id: &EpochId, hash: &CryptoHash) { - let mut epoch_manager = self.write(); - epoch_manager.epoch_info_aggregator = EpochInfoAggregator::new(*epoch_id, *hash); - } - /// Returns the set of chunk validators for a given epoch fn get_epoch_all_validators( &self, diff --git a/chain/epoch-manager/src/lib.rs b/chain/epoch-manager/src/lib.rs index cb1f8e6851c..0a5816923c8 100644 --- a/chain/epoch-manager/src/lib.rs +++ b/chain/epoch-manager/src/lib.rs @@ -2092,62 +2092,4 @@ impl EpochManager { // The height doesn't belong to any of the epochs around the tip, return an empty Vec. Ok(vec![]) } - - #[cfg(feature = "new_epoch_sync")] - pub fn get_all_epoch_hashes_from_db( - &self, - last_block_info: &BlockInfo, - ) -> Result, EpochError> { - let _span = - tracing::debug_span!(target: "epoch_manager", "get_all_epoch_hashes_from_db", ?last_block_info) - .entered(); - - let mut result = vec![]; - let first_epoch_block_height = - self.get_block_info(last_block_info.epoch_first_block())?.height(); - let mut current_block_info = last_block_info.clone(); - while current_block_info.hash() != last_block_info.epoch_first_block() { - // Check that we didn't reach previous epoch. - // This only should happen if BlockInfo data is incorrect. - // Without this assert same BlockInfo will cause infinite loop instead of crash with a message. - assert!( - current_block_info.height() > first_epoch_block_height, - "Reached {:?} from {:?} when first epoch height is {:?}", - current_block_info, - last_block_info, - first_epoch_block_height - ); - - result.push(*current_block_info.hash()); - current_block_info = (*self.get_block_info(current_block_info.prev_hash())?).clone(); - } - // First block of an epoch is not covered by the while loop. - result.push(*current_block_info.hash()); - - Ok(result) - } - - #[cfg(feature = "new_epoch_sync")] - fn get_all_epoch_hashes_from_cache( - &self, - last_block_info: &BlockInfo, - hash_to_prev_hash: &HashMap, - ) -> Result, EpochError> { - let _span = - tracing::debug_span!(target: "epoch_manager", "get_all_epoch_hashes_from_cache", ?last_block_info) - .entered(); - - let mut result = vec![]; - let mut current_hash = *last_block_info.hash(); - while current_hash != *last_block_info.epoch_first_block() { - result.push(current_hash); - current_hash = *hash_to_prev_hash - .get(¤t_hash) - .ok_or(EpochError::MissingBlock(current_hash))?; - } - // First block of an epoch is not covered by the while loop. - result.push(current_hash); - - Ok(result) - } } diff --git a/chain/jsonrpc/jsonrpc-tests/src/lib.rs b/chain/jsonrpc/jsonrpc-tests/src/lib.rs index 8fd13108f39..52d2de60e07 100644 --- a/chain/jsonrpc/jsonrpc-tests/src/lib.rs +++ b/chain/jsonrpc/jsonrpc-tests/src/lib.rs @@ -7,7 +7,7 @@ use near_async::{ messaging::{noop, IntoMultiSender}, }; use near_chain_configs::GenesisConfig; -use near_client::test_utils::setup_no_network_with_validity_period_and_no_epoch_sync; +use near_client::test_utils::setup_no_network_with_validity_period; use near_client::ViewClientActor; use near_jsonrpc::{start_http, RpcConfig}; use near_jsonrpc_primitives::{ @@ -30,16 +30,16 @@ pub enum NodeType { } pub fn start_all(clock: Clock, node_type: NodeType) -> (Addr, tcp::ListenerAddr) { - start_all_with_validity_period_and_no_epoch_sync(clock, node_type, 100, false) + start_all_with_validity_period(clock, node_type, 100, false) } -pub fn start_all_with_validity_period_and_no_epoch_sync( +pub fn start_all_with_validity_period( clock: Clock, node_type: NodeType, transaction_validity_period: NumBlocks, enable_doomslug: bool, ) -> (Addr, tcp::ListenerAddr) { - let actor_handles = setup_no_network_with_validity_period_and_no_epoch_sync( + let actor_handles = setup_no_network_with_validity_period( clock, vec!["test1".parse().unwrap()], if let NodeType::Validator = node_type { diff --git a/chain/jsonrpc/jsonrpc-tests/tests/rpc_transactions.rs b/chain/jsonrpc/jsonrpc-tests/tests/rpc_transactions.rs index 69b10403d84..99ab9896a37 100644 --- a/chain/jsonrpc/jsonrpc-tests/tests/rpc_transactions.rs +++ b/chain/jsonrpc/jsonrpc-tests/tests/rpc_transactions.rs @@ -120,7 +120,7 @@ fn test_send_tx_commit() { fn test_expired_tx() { init_integration_logger(); run_actix(async { - let (_, addr) = test_utils::start_all_with_validity_period_and_no_epoch_sync( + let (_, addr) = test_utils::start_all_with_validity_period( Clock::real(), test_utils::NodeType::Validator, 1, diff --git a/core/chain-configs/src/client_config.rs b/core/chain-configs/src/client_config.rs index d581d918649..5d680cdb038 100644 --- a/core/chain-configs/src/client_config.rs +++ b/core/chain-configs/src/client_config.rs @@ -265,10 +265,6 @@ pub fn default_sync_height_threshold() -> u64 { 1 } -pub fn default_epoch_sync_enabled() -> bool { - false -} - pub fn default_state_sync() -> Option { Some(StateSyncConfig { dump: None, @@ -433,8 +429,6 @@ pub struct ClientConfig { pub save_trie_changes: bool, /// Number of threads for ViewClientActor pool. pub view_client_threads: usize, - /// Run Epoch Sync on the start. - pub epoch_sync_enabled: bool, /// Number of seconds between state requests for view client. pub view_client_throttle_period: Duration, /// Upper bound of the byte size of contract state that is still viewable. None is no limit @@ -501,7 +495,6 @@ impl ClientConfig { num_block_producer_seats: NumSeats, archive: bool, save_trie_changes: bool, - epoch_sync_enabled: bool, state_sync_enabled: bool, ) -> Self { assert!( @@ -555,7 +548,6 @@ impl ClientConfig { save_trie_changes, log_summary_style: LogSummaryStyle::Colored, view_client_threads: 1, - epoch_sync_enabled, view_client_throttle_period: Duration::seconds(1), trie_viewer_state_size_limit: None, max_gas_burnt_view: None, diff --git a/core/chain-configs/src/lib.rs b/core/chain-configs/src/lib.rs index 3ceec9b0361..78baf9058d6 100644 --- a/core/chain-configs/src/lib.rs +++ b/core/chain-configs/src/lib.rs @@ -10,20 +10,19 @@ pub mod test_utils; mod updateable_config; pub use client_config::{ - default_enable_multiline_logging, default_epoch_sync_enabled, - default_header_sync_expected_height_per_second, default_header_sync_initial_timeout, - default_header_sync_progress_timeout, default_header_sync_stall_ban_timeout, - default_log_summary_period, default_orphan_state_witness_max_size, - default_orphan_state_witness_pool_size, default_produce_chunk_add_transactions_time_limit, - default_state_sync, default_state_sync_enabled, default_state_sync_timeout, - default_sync_check_period, default_sync_height_threshold, default_sync_max_block_requests, - default_sync_step_period, default_transaction_pool_size_limit, - default_trie_viewer_state_size_limit, default_tx_routing_height_horizon, - default_view_client_threads, default_view_client_throttle_period, - ChunkDistributionNetworkConfig, ChunkDistributionUris, ClientConfig, DumpConfig, - ExternalStorageConfig, ExternalStorageLocation, GCConfig, LogSummaryStyle, ReshardingConfig, - ReshardingHandle, StateSyncConfig, SyncConfig, DEFAULT_GC_NUM_EPOCHS_TO_KEEP, - DEFAULT_STATE_SYNC_NUM_CONCURRENT_REQUESTS_EXTERNAL, + default_enable_multiline_logging, default_header_sync_expected_height_per_second, + default_header_sync_initial_timeout, default_header_sync_progress_timeout, + default_header_sync_stall_ban_timeout, default_log_summary_period, + default_orphan_state_witness_max_size, default_orphan_state_witness_pool_size, + default_produce_chunk_add_transactions_time_limit, default_state_sync, + default_state_sync_enabled, default_state_sync_timeout, default_sync_check_period, + default_sync_height_threshold, default_sync_max_block_requests, default_sync_step_period, + default_transaction_pool_size_limit, default_trie_viewer_state_size_limit, + default_tx_routing_height_horizon, default_view_client_threads, + default_view_client_throttle_period, ChunkDistributionNetworkConfig, ChunkDistributionUris, + ClientConfig, DumpConfig, ExternalStorageConfig, ExternalStorageLocation, GCConfig, + LogSummaryStyle, ReshardingConfig, ReshardingHandle, StateSyncConfig, SyncConfig, + DEFAULT_GC_NUM_EPOCHS_TO_KEEP, DEFAULT_STATE_SYNC_NUM_CONCURRENT_REQUESTS_EXTERNAL, DEFAULT_STATE_SYNC_NUM_CONCURRENT_REQUESTS_ON_CATCHUP_EXTERNAL, MIN_GC_NUM_EPOCHS_TO_KEEP, TEST_STATE_SYNC_TIMEOUT, }; diff --git a/core/primitives/Cargo.toml b/core/primitives/Cargo.toml index 761acf05d85..2b1ccfea6c2 100644 --- a/core/primitives/Cargo.toml +++ b/core/primitives/Cargo.toml @@ -88,7 +88,6 @@ nightly_protocol = [ "near-primitives/nightly_protocol", ] -new_epoch_sync = [] default = ["near-primitives/rand"] calimero_zero_storage = [] diff --git a/core/primitives/src/epoch_manager.rs b/core/primitives/src/epoch_manager.rs index 290aeba1ee4..7007f182f7b 100644 --- a/core/primitives/src/epoch_manager.rs +++ b/core/primitives/src/epoch_manager.rs @@ -340,168 +340,6 @@ pub struct ValidatorSelectionConfig { pub shuffle_shard_assignment_for_chunk_producers: bool, } -#[cfg(feature = "new_epoch_sync")] -pub mod epoch_sync { - use crate::block_header::BlockHeader; - use crate::epoch_block_info::BlockInfo; - use crate::epoch_info::EpochInfo; - use crate::errors::epoch_sync::{EpochSyncHashType, EpochSyncInfoError}; - use crate::types::EpochId; - use borsh::{BorshDeserialize, BorshSerialize}; - use near_primitives_core::hash::CryptoHash; - use std::collections::{HashMap, HashSet}; - - /// Struct to keep all the info that is transferred for one epoch during Epoch Sync. - #[derive(BorshSerialize, BorshDeserialize, PartialEq, Debug)] - pub struct EpochSyncInfo { - /// All block hashes of this epoch. In order of production. - pub all_block_hashes: Vec, - /// All headers relevant to epoch sync. - /// Contains epoch headers that need to be saved + supporting headers needed for validation. - /// Probably contains one header from the previous epoch. - /// It refers to `last_final_block` of the first block of the epoch. - /// Also contains first header from the next epoch. - /// It refers to `next_epoch_first_hash`. - pub headers: HashMap, - /// Hashes of headers that need to be validated and saved. - pub headers_to_save: HashSet, - /// Hash of the first block of the next epoch. - /// Header of this block contains `epoch_sync_data_hash`. - pub next_epoch_first_hash: CryptoHash, - pub epoch_info: EpochInfo, - pub next_epoch_info: EpochInfo, - pub next_next_epoch_info: EpochInfo, - } - - impl EpochSyncInfo { - pub fn get_epoch_id(&self) -> Result<&EpochId, EpochSyncInfoError> { - Ok(self.get_epoch_first_header()?.epoch_id()) - } - - pub fn get_next_epoch_id(&self) -> Result<&EpochId, EpochSyncInfoError> { - Ok(self - .get_header(self.next_epoch_first_hash, EpochSyncHashType::NextEpochFirstBlock)? - .epoch_id()) - } - - pub fn get_next_next_epoch_id(&self) -> Result { - Ok(EpochId(*self.get_epoch_last_hash()?)) - } - - pub fn get_epoch_last_hash(&self) -> Result<&CryptoHash, EpochSyncInfoError> { - let epoch_height = self.epoch_info.epoch_height(); - - self.all_block_hashes.last().ok_or(EpochSyncInfoError::ShortEpoch { epoch_height }) - } - - pub fn get_epoch_last_header(&self) -> Result<&BlockHeader, EpochSyncInfoError> { - self.get_header(*self.get_epoch_last_hash()?, EpochSyncHashType::LastEpochBlock) - } - - pub fn get_epoch_last_finalised_hash(&self) -> Result<&CryptoHash, EpochSyncInfoError> { - Ok(self.get_epoch_last_header()?.last_final_block()) - } - - pub fn get_epoch_last_finalised_header(&self) -> Result<&BlockHeader, EpochSyncInfoError> { - self.get_header( - *self.get_epoch_last_finalised_hash()?, - EpochSyncHashType::LastFinalBlock, - ) - } - - pub fn get_epoch_first_hash(&self) -> Result<&CryptoHash, EpochSyncInfoError> { - let epoch_height = self.epoch_info.epoch_height(); - - self.all_block_hashes.first().ok_or(EpochSyncInfoError::ShortEpoch { epoch_height }) - } - - pub fn get_epoch_first_header(&self) -> Result<&BlockHeader, EpochSyncInfoError> { - self.get_header(*self.get_epoch_first_hash()?, EpochSyncHashType::FirstEpochBlock) - } - - /// Reconstruct BlockInfo for `hash` from information in EpochSyncInfo. - pub fn get_block_info(&self, hash: &CryptoHash) -> Result { - let epoch_first_header = self.get_epoch_first_header()?; - let header = self.get_header(*hash, EpochSyncHashType::Other)?; - - if epoch_first_header.epoch_id() != header.epoch_id() { - let msg = "We can only correctly reconstruct headers from this epoch"; - debug_assert!(false, "{}", msg); - tracing::error!(message = msg); - } - - let last_finalized_height = if *header.last_final_block() == CryptoHash::default() { - 0 - } else { - let last_finalized_header = - self.get_header(*header.last_final_block(), EpochSyncHashType::LastFinalBlock)?; - last_finalized_header.height() - }; - let mut block_info = BlockInfo::new( - *header.hash(), - header.height(), - last_finalized_height, - *header.last_final_block(), - *header.prev_hash(), - header.prev_validator_proposals().collect(), - header.chunk_mask().to_vec(), - vec![], - header.total_supply(), - header.latest_protocol_version(), - header.raw_timestamp(), - ); - - *block_info.epoch_id_mut() = *epoch_first_header.epoch_id(); - *block_info.epoch_first_block_mut() = *epoch_first_header.hash(); - Ok(block_info) - } - - /// Reconstruct legacy `epoch_sync_data_hash` from `EpochSyncInfo`. - /// `epoch_sync_data_hash` was introduced in `BlockHeaderInnerRestV3`. - /// Using this hash we can verify that `EpochInfo` data provided in `EpochSyncInfo` is correct. - pub fn calculate_epoch_sync_data_hash(&self) -> Result { - let epoch_height = self.epoch_info.epoch_height(); - - if self.all_block_hashes.len() < 2 { - return Err(EpochSyncInfoError::ShortEpoch { epoch_height }); - } - let epoch_first_block = self.all_block_hashes[0]; - let epoch_prev_last_block = self.all_block_hashes[self.all_block_hashes.len() - 2]; - let epoch_last_block = self.all_block_hashes[self.all_block_hashes.len() - 1]; - - Ok(CryptoHash::hash_borsh(&( - self.get_block_info(&epoch_first_block)?, - self.get_block_info(&epoch_prev_last_block)?, - self.get_block_info(&epoch_last_block)?, - &self.epoch_info, - &self.next_epoch_info, - &self.next_next_epoch_info, - ))) - } - - /// Read legacy `epoch_sync_data_hash` from next epoch first header. - /// `epoch_sync_data_hash` was introduced in `BlockHeaderInnerRestV3`. - /// Using this hash we can verify that `EpochInfo` data provided in `EpochSyncInfo` is correct. - pub fn get_epoch_sync_data_hash(&self) -> Result, EpochSyncInfoError> { - let next_epoch_first_header = - self.get_header(self.next_epoch_first_hash, EpochSyncHashType::Other)?; - Ok(next_epoch_first_header.epoch_sync_data_hash()) - } - - pub fn get_header( - &self, - hash: CryptoHash, - hash_type: EpochSyncHashType, - ) -> Result<&BlockHeader, EpochSyncInfoError> { - self.headers.get(&hash).ok_or(EpochSyncInfoError::HashNotFound { - hash, - hash_type, - epoch_height: self.epoch_info.epoch_height(), - }) - } - } -} - #[derive(BorshSerialize, BorshDeserialize, ProtocolSchema)] pub struct EpochSummary { pub prev_epoch_last_block_hash: CryptoHash, diff --git a/core/primitives/src/epoch_sync.rs b/core/primitives/src/epoch_sync.rs deleted file mode 100644 index 49f92e0d6bf..00000000000 --- a/core/primitives/src/epoch_sync.rs +++ /dev/null @@ -1,34 +0,0 @@ -use crate::block_header::BlockHeader; -use crate::epoch_block_info::BlockInfo; -use crate::epoch_info::EpochInfo; -use crate::merkle::PartialMerkleTree; -use crate::views::LightClientBlockView; -use borsh::{BorshDeserialize, BorshSerialize}; - -#[derive(BorshSerialize, BorshDeserialize, Eq, PartialEq, Debug, Clone)] -pub struct EpochSyncFinalizationResponse { - pub cur_epoch_header: BlockHeader, - pub prev_epoch_headers: Vec, - pub header_sync_init_header: BlockHeader, - pub header_sync_init_header_tree: PartialMerkleTree, - // This Block Info is required by Epoch Manager when it checks if it's a good time to start a new Epoch. - // Epoch Manager asks for height difference by obtaining first Block Info of the Epoch. - pub prev_epoch_first_block_info: BlockInfo, - // This Block Info is required in State Sync that is started right after Epoch Sync is finished. - // It is used by `verify_chunk_signature_with_header_parts` in `save_block` as it calls `get_epoch_id_from_prev_block`. - pub prev_epoch_prev_last_block_info: BlockInfo, - // This Block Info is connected with the first actual Block received in State Sync. - // It is also used in Epoch Manager. - pub prev_epoch_last_block_info: BlockInfo, - pub prev_epoch_info: EpochInfo, - pub cur_epoch_info: EpochInfo, - // Next Epoch Info is required by Block Sync when Blocks of current Epoch will come. - // It asks in `process_block_single`, returns `Epoch Out Of Bounds` error otherwise. - pub next_epoch_info: EpochInfo, -} - -#[derive(BorshSerialize, BorshDeserialize, Eq, PartialEq, Debug, Clone)] -pub enum EpochSyncResponse { - UpToDate, - Advance { light_client_block_view: Box }, -} diff --git a/core/primitives/src/errors.rs b/core/primitives/src/errors.rs index 9800536d099..23d51cbc225 100644 --- a/core/primitives/src/errors.rs +++ b/core/primitives/src/errors.rs @@ -1270,28 +1270,3 @@ pub enum FunctionCallError { _EVMError, ExecutionError(String), } - -#[cfg(feature = "new_epoch_sync")] -pub mod epoch_sync { - use near_primitives_core::hash::CryptoHash; - use near_primitives_core::types::EpochHeight; - use std::fmt::Debug; - - #[derive(Eq, PartialEq, Clone, strum::Display, Debug)] - pub enum EpochSyncHashType { - LastEpochBlock, - LastFinalBlock, - FirstEpochBlock, - NextEpochFirstBlock, - Other, - BlockToSave, - } - - #[derive(Eq, PartialEq, Clone, thiserror::Error, Debug)] - pub enum EpochSyncInfoError { - #[error("{hash_type} hash {hash:?} not a part of EpochSyncInfo for epoch {epoch_height}")] - HashNotFound { hash: CryptoHash, hash_type: EpochSyncHashType, epoch_height: EpochHeight }, - #[error("all_block_hashes.len() < 2 for epoch {epoch_height}")] - ShortEpoch { epoch_height: EpochHeight }, - } -} diff --git a/core/primitives/src/lib.rs b/core/primitives/src/lib.rs index 04d695a4b2d..235a695a1d0 100644 --- a/core/primitives/src/lib.rs +++ b/core/primitives/src/lib.rs @@ -17,7 +17,6 @@ pub mod congestion_info; pub mod epoch_block_info; pub mod epoch_info; pub mod epoch_manager; -pub mod epoch_sync; pub mod errors; pub mod merkle; pub mod network; diff --git a/core/store/Cargo.toml b/core/store/Cargo.toml index b52fae6deb5..c9a57cc5341 100644 --- a/core/store/Cargo.toml +++ b/core/store/Cargo.toml @@ -78,7 +78,6 @@ io_trace = [] no_cache = [] single_thread_rocksdb = [] # Deactivate RocksDB IO background threads test_features = ["near-vm-runner/test_features"] -new_epoch_sync = [] # TODO(#11639): extract metrics into separate feature nightly_protocol = [ diff --git a/core/store/src/columns.rs b/core/store/src/columns.rs index 696d3786134..1ac720a5e2d 100644 --- a/core/store/src/columns.rs +++ b/core/store/src/columns.rs @@ -293,12 +293,6 @@ pub enum DBCol { /// Witnesses with the lowest index are garbage collected first. /// u64 -> LatestWitnessesKey LatestWitnessesByIndex, - /// Column to store data for Epoch Sync. - /// Does not contain data for genesis epoch. - /// - *Rows*: `epoch_id` - /// - *Column type*: `EpochSyncInfo - #[cfg(feature = "new_epoch_sync")] - EpochSyncInfo, } /// Defines different logical parts of a db key. @@ -501,8 +495,6 @@ impl DBCol { | DBCol::FlatStateChanges | DBCol::FlatStateDeltaMetadata | DBCol::FlatStorageStatus => false, - #[cfg(feature = "new_epoch_sync")] - DBCol::EpochSyncInfo => false } } @@ -574,8 +566,6 @@ impl DBCol { DBCol::StateTransitionData => &[DBKeyType::BlockHash, DBKeyType::ShardId], DBCol::LatestChunkStateWitnesses => &[DBKeyType::LatestWitnessesKey], DBCol::LatestWitnessesByIndex => &[DBKeyType::LatestWitnessIndex], - #[cfg(feature = "new_epoch_sync")] - DBCol::EpochSyncInfo => &[DBKeyType::EpochId], } } } diff --git a/integration-tests/Cargo.toml b/integration-tests/Cargo.toml index 41a98d1ec2a..ba01bfd15c6 100644 --- a/integration-tests/Cargo.toml +++ b/integration-tests/Cargo.toml @@ -167,5 +167,4 @@ sandbox = [ ] no_cache = ["nearcore/no_cache"] calimero_zero_storage = [] -new_epoch_sync = ["nearcore/new_epoch_sync"] testloop = ["near-chain/testloop"] diff --git a/integration-tests/src/test_loop/builder.rs b/integration-tests/src/test_loop/builder.rs index 8d1d11cdc53..73744b2902c 100644 --- a/integration-tests/src/test_loop/builder.rs +++ b/integration-tests/src/test_loop/builder.rs @@ -216,8 +216,7 @@ impl TestLoopBuilder { let sync_jobs_adapter = LateBoundSender::new(); let genesis = self.genesis.clone().unwrap(); - let mut client_config = - ClientConfig::test(true, 600, 2000, 4, is_archival, true, false, false); + let mut client_config = ClientConfig::test(true, 600, 2000, 4, is_archival, true, false); client_config.max_block_wait_delay = Duration::seconds(6); client_config.state_sync_enabled = true; client_config.state_sync_timeout = Duration::milliseconds(100); diff --git a/integration-tests/src/test_loop/tests/simple_test_loop_example.rs b/integration-tests/src/test_loop/tests/simple_test_loop_example.rs index a7a00c56b8f..3ccbe2a906e 100644 --- a/integration-tests/src/test_loop/tests/simple_test_loop_example.rs +++ b/integration-tests/src/test_loop/tests/simple_test_loop_example.rs @@ -38,7 +38,6 @@ fn test_client_with_simple_test_loop() { false, true, false, - false, ); let initial_balance = 10000 * ONE_NEAR; let accounts = diff --git a/integration-tests/src/tests/client/chunks_management.rs b/integration-tests/src/tests/client/chunks_management.rs index 0f8c1457714..ac75c928ab5 100644 --- a/integration-tests/src/tests/client/chunks_management.rs +++ b/integration-tests/src/tests/client/chunks_management.rs @@ -90,7 +90,6 @@ impl Test { ]); } let archive = vec![false; vs.all_validators().count()]; - let epoch_sync_enabled = vec![true; vs.all_validators().count()]; let key_pairs = (0..vs.all_validators().count()).map(|_| PeerInfo::random()).collect::>(); @@ -105,7 +104,6 @@ impl Test { 5, true, archive, - epoch_sync_enabled, false, chunk_distribution_config, Box::new(move |_, from_whom: AccountId, msg: &PeerManagerMessageRequest| { diff --git a/integration-tests/src/tests/client/epoch_sync.rs b/integration-tests/src/tests/client/epoch_sync.rs deleted file mode 100644 index 2397b50340b..00000000000 --- a/integration-tests/src/tests/client/epoch_sync.rs +++ /dev/null @@ -1,491 +0,0 @@ -use crate::tests::nearcore_utils::{add_blocks, setup_configs_with_epoch_length}; -use crate::tests::test_helpers::heavy_test; -use actix::Actor; -use actix_rt::System; -use futures::{future, FutureExt}; -use near_actix_test_utils::run_actix; -use near_async::time::Clock; -use near_chain::Provenance; -use near_chain::{BlockProcessingArtifact, ChainStoreAccess}; -use near_chain_configs::Genesis; -use near_client::test_utils::TestEnv; -use near_client::ProcessTxResponse; -use near_client_primitives::types::GetBlock; -use near_crypto::{InMemorySigner, KeyType}; -use near_network::test_utils::WaitOrTimeoutActor; -use near_o11y::testonly::{init_integration_logger, init_test_logger}; -use near_o11y::WithSpanContextExt; -use near_primitives::epoch_block_info::BlockInfo; -use near_primitives::epoch_manager::epoch_sync::EpochSyncInfo; -use near_primitives::state_part::PartId; -use near_primitives::state_sync::get_num_state_parts; -use near_primitives::test_utils::create_test_signer; -use near_primitives::transaction::{ - Action, DeployContractAction, FunctionCallAction, SignedTransaction, -}; -use near_primitives::types::EpochId; -use near_primitives::utils::MaybeValidated; -use near_primitives_core::hash::CryptoHash; -use near_primitives_core::types::BlockHeight; -use near_store::Mode::ReadOnly; -use near_store::{DBCol, NodeStorage}; -use nearcore::test_utils::TestEnvNightshadeSetupExt; -use nearcore::{start_with_config, NearConfig}; -use std::collections::HashSet; -use std::path::Path; -use std::sync::{Arc, RwLock}; - -fn generate_transactions(last_hash: &CryptoHash, h: BlockHeight) -> Vec { - let mut txs = vec![]; - let signer = - InMemorySigner::from_seed("test0".parse().unwrap(), KeyType::ED25519, "test0").into(); - if h == 1 { - txs.push(SignedTransaction::from_actions( - h, - "test0".parse().unwrap(), - "test0".parse().unwrap(), - &signer, - vec![Action::DeployContract(DeployContractAction { - code: near_test_contracts::rs_contract().to_vec(), - })], - *last_hash, - 0, - )); - } - - for i in 0..5 { - txs.push(SignedTransaction::from_actions( - h * 10 + i, - "test0".parse().unwrap(), - "test0".parse().unwrap(), - &signer, - vec![Action::FunctionCall(Box::new(FunctionCallAction { - method_name: "write_random_value".to_string(), - args: vec![], - gas: 100_000_000_000_000, - deposit: 0, - }))], - *last_hash, - 0, - )); - } - - for i in 0..5 { - txs.push(SignedTransaction::send_money( - h * 10 + i, - "test0".parse().unwrap(), - "test1".parse().unwrap(), - &signer, - 1, - *last_hash, - )); - } - txs -} - -/// Produce 4 epochs with some transactions. -/// When the first block of the next epoch is finalised check that `EpochSyncInfo` has been recorded. -#[test] -fn test_continuous_epoch_sync_info_population() { - init_test_logger(); - - let epoch_length = 5; - let max_height = epoch_length * 4 + 3; - - let mut genesis = Genesis::test(vec!["test0".parse().unwrap(), "test1".parse().unwrap()], 1); - genesis.config.epoch_length = epoch_length; - let mut env = TestEnv::builder(&genesis.config).nightshade_runtimes(&genesis).build(); - - let mut last_hash = *env.clients[0].chain.genesis().hash(); - let mut last_epoch_id = EpochId::default(); - - for h in 1..max_height { - for tx in generate_transactions(&last_hash, h) { - assert_eq!(env.clients[0].process_tx(tx, false, false), ProcessTxResponse::ValidTx); - } - - let block = env.clients[0].produce_block(h).unwrap().unwrap(); - env.process_block(0, block.clone(), Provenance::PRODUCED); - last_hash = *block.hash(); - - let last_final_hash = block.header().last_final_block(); - if *last_final_hash == CryptoHash::default() { - continue; - } - let last_final_header = - env.clients[0].chain.chain_store().get_block_header(last_final_hash).unwrap(); - - if *last_final_header.epoch_id() != last_epoch_id { - let epoch_id = last_epoch_id; - - tracing::debug!("Checking epoch: {:?}", &epoch_id); - assert!(env.clients[0].chain.chain_store().get_epoch_sync_info(&epoch_id).is_ok()); - tracing::debug!("OK"); - } - - last_epoch_id = *last_final_header.epoch_id(); - } -} - -/// Produce 4 epochs + 10 blocks. -/// Start second node without epoch sync, but with state sync. -/// Sync second node to first node (at least headers). -/// Check that it has all EpochSyncInfo records and all of them are correct. -#[test] -fn test_continuous_epoch_sync_info_population_on_header_sync() { - heavy_test(|| { - init_integration_logger(); - - let (genesis, genesis_block, mut near1_base, mut near2_base) = - setup_configs_with_epoch_length(50); - - let dir1_base = - tempfile::Builder::new().prefix("epoch_sync_info_in_header_sync_1").tempdir().unwrap(); - let dir2_base = - tempfile::Builder::new().prefix("epoch_sync_info_in_header_sync_2").tempdir().unwrap(); - let epoch_ids_base = Arc::new(RwLock::new(HashSet::new())); - - let near1 = near1_base.clone(); - let near2 = near2_base.clone(); - let dir1_path = dir1_base.path(); - let dir2_path = dir2_base.path(); - let epoch_ids = epoch_ids_base.clone(); - - run_actix(async move { - // Start first node - let nearcore::NearNode { client: client1, .. } = - start_with_config(dir1_path, near1).expect("start_with_config"); - - // Generate 4 epochs + 10 blocks - let signer = create_test_signer("other"); - let blocks = add_blocks( - Clock::real(), - vec![genesis_block], - client1, - 210, - genesis.config.epoch_length, - &signer, - ); - - // Save all finished epoch_ids - let mut epoch_ids = epoch_ids.write().unwrap(); - for block in blocks[0..200].iter() { - epoch_ids.insert(*block.header().epoch_id()); - } - - // Start second node - let nearcore::NearNode { view_client: view_client2, .. } = - start_with_config(dir2_path, near2).expect("start_with_config"); - - // Wait for second node's headers to sync. - // Timeout here means that header sync is not working. - // Header sync is better debugged through other tests. - WaitOrTimeoutActor::new( - Box::new(move |_ctx| { - let actor = view_client2.send(GetBlock::latest().with_span_context()); - let actor = actor.then(|res| { - match &res { - Ok(Ok(b)) if b.header.height == 210 => System::current().stop(), - Err(_) => return future::ready(()), - _ => {} - }; - future::ready(()) - }); - actix::spawn(actor); - }), - 100, - 120000, - ) - .start(); - }); - - // Open storages of both nodes - let open_read_only_storage = |home_dir: &Path, near_config: &NearConfig| -> NodeStorage { - let opener = NodeStorage::opener(home_dir, false, &near_config.config.store, None); - opener.open_in_mode(ReadOnly).unwrap() - }; - - let store1 = open_read_only_storage(dir1_base.path(), &mut near1_base).get_hot_store(); - let store2 = open_read_only_storage(dir2_base.path(), &mut near2_base).get_hot_store(); - - // Check that for every epoch second store has EpochSyncInfo. - // And that values in both stores are the same. - let epoch_ids = epoch_ids_base.read().unwrap(); - for epoch_id in epoch_ids.iter() { - // Check that we have a value for EpochSyncInfo in the synced node - assert!( - store2 - .get_ser::(DBCol::EpochSyncInfo, epoch_id.as_ref()) - .unwrap() - .is_some(), - "{:?}", - epoch_id - ); - // Check that it matches value in full node exactly - assert_eq!( - store1 - .get_ser::(DBCol::EpochSyncInfo, epoch_id.as_ref()) - .unwrap() - .unwrap(), - store2 - .get_ser::(DBCol::EpochSyncInfo, epoch_id.as_ref()) - .unwrap() - .unwrap(), - "{:?}", - epoch_id - ); - } - }); -} - -/// Check that we can reconstruct `BlockInfo` and `epoch_sync_data_hash` from `EpochSyncInfo`. -#[test] -fn test_epoch_sync_data_hash_from_epoch_sync_info() { - init_test_logger(); - - let epoch_length = 5; - let max_height = epoch_length * 4 + 3; - - let mut genesis = Genesis::test(vec!["test0".parse().unwrap(), "test1".parse().unwrap()], 1); - genesis.config.epoch_length = epoch_length; - let mut env = TestEnv::builder(&genesis.config).nightshade_runtimes(&genesis).build(); - - let mut last_hash = *env.clients[0].chain.genesis().hash(); - let mut last_epoch_id = EpochId::default(); - - for h in 1..max_height { - for tx in generate_transactions(&last_hash, h) { - assert_eq!(env.clients[0].process_tx(tx, false, false), ProcessTxResponse::ValidTx); - } - - let block = env.clients[0].produce_block(h).unwrap().unwrap(); - env.process_block(0, block.clone(), Provenance::PRODUCED); - last_hash = *block.hash(); - - let last_final_hash = block.header().last_final_block(); - if *last_final_hash == CryptoHash::default() { - continue; - } - let last_final_header = - env.clients[0].chain.chain_store().get_block_header(last_final_hash).unwrap(); - - if *last_final_header.epoch_id() != last_epoch_id { - let epoch_id = last_epoch_id; - - let epoch_sync_info = - env.clients[0].chain.chain_store().get_epoch_sync_info(&epoch_id).unwrap(); - - tracing::debug!("Checking epoch sync info: {:?}", &epoch_sync_info); - - // Check that all BlockInfos needed for new epoch sync can be reconstructed. - // This also helps with debugging if `epoch_sync_data_hash` doesn't match. - for hash in &epoch_sync_info.headers_to_save { - let block_info = env.clients[0] - .chain - .chain_store() - .store() - .get_ser::(DBCol::BlockInfo, hash.as_ref()) - .unwrap() - .unwrap(); - let reconstructed_block_info = epoch_sync_info.get_block_info(hash).unwrap(); - assert_eq!(block_info, reconstructed_block_info); - } - - assert_eq!( - epoch_sync_info.calculate_epoch_sync_data_hash().unwrap(), - epoch_sync_info.get_epoch_sync_data_hash().unwrap().unwrap(), - ); - - tracing::debug!("OK"); - } - - last_epoch_id = *last_final_header.epoch_id(); - } -} - -/// This is an unreliable test that mocks/reimplements sync logic. -/// After epoch sync is integrated into sync process we can write a better test. -/// -/// The test simulates two clients, one of which is -/// - stopped after one epoch -/// - synced through epoch sync, header sync, state sync, and block sync -/// - in sync with other client for two more epochs -#[test] -#[ignore] -fn test_node_after_simulated_sync() { - init_test_logger(); - let num_clients = 2; - let epoch_length = 20; - let num_epochs = 5; - // Max height for clients[0] before sync. - let max_height_0 = epoch_length * num_epochs - 1; - // Max height for clients[1] before sync. - let max_height_1 = epoch_length; - - // TestEnv setup - let mut genesis = Genesis::test(vec!["test0".parse().unwrap(), "test1".parse().unwrap()], 1); - genesis.config.epoch_length = epoch_length; - - let mut env = TestEnv::builder(&genesis.config) - .clients_count(num_clients) - .real_stores() - .use_state_snapshots() - .nightshade_runtimes(&genesis) - .build(); - - // Produce blocks - let mut last_hash = *env.clients[0].chain.genesis().hash(); - let mut blocks = vec![]; - - for h in 1..max_height_0 { - for tx in generate_transactions(&last_hash, h) { - assert_eq!(env.clients[0].process_tx(tx, false, false), ProcessTxResponse::ValidTx); - } - - let block = env.clients[0].produce_block(h).unwrap().unwrap(); - env.process_block(0, block.clone(), Provenance::PRODUCED); - last_hash = *block.hash(); - blocks.push(block.clone()); - - if h < max_height_1 { - env.process_block(1, block.clone(), Provenance::NONE); - } - } - - // Do "epoch sync" up to last epoch - - // Current epoch for clients[0]. - let epoch_id0 = env.clients[0].chain.header_head().unwrap().epoch_id; - // Next epoch for clients[1]. - let mut epoch_id1 = env.clients[1].chain.header_head().unwrap().epoch_id; - - // We rely on the fact that epoch_id0 is not finished for clients[0]. - // So we need to "sync" all epochs in [epoch_id1, epoch_id0). - while epoch_id1 != epoch_id0 { - tracing::debug!("Syncing epoch {:?}", epoch_id1); - - let epoch_sync_data = - env.clients[0].chain.chain_store().get_epoch_sync_info(&epoch_id1).unwrap(); - env.clients[1].chain.validate_and_record_epoch_sync_info(&epoch_sync_data).unwrap(); - - epoch_id1 = env.clients[1] - .epoch_manager - .get_next_epoch_id(&env.clients[1].chain.header_head().unwrap().last_block_hash) - .unwrap(); - } - - // Do "header sync" for the current epoch for clients[0]. - tracing::debug!("Client 0 Header Head: {:?}", env.clients[0].chain.header_head()); - tracing::debug!("Client 1 Header Head Before: {:?}", env.clients[1].chain.header_head()); - - let mut last_epoch_headers = vec![]; - for block in &blocks { - if *block.header().epoch_id() == epoch_id0 { - last_epoch_headers.push(block.header().clone()); - } - } - env.clients[1].chain.sync_block_headers(last_epoch_headers, &mut vec![]).unwrap(); - - tracing::debug!("Client 0 Header Head: {:?}", env.clients[0].chain.header_head()); - tracing::debug!("Client 1 Header Head After: {:?}", env.clients[1].chain.header_head()); - - // Do "state sync" for the last epoch - // write last block of prev epoch - { - let mut store_update = env.clients[1].chain.chain_store().store().store_update(); - - let mut last_block = &blocks[0]; - for block in &blocks { - if *block.header().epoch_id() == epoch_id0 { - break; - } - last_block = block; - } - - tracing::debug!("Write block {:?}", last_block.header()); - - store_update.insert_ser(DBCol::Block, last_block.hash().as_ref(), last_block).unwrap(); - store_update.commit().unwrap(); - } - - let sync_hash = *env.clients[0] - .epoch_manager - .get_block_info(&env.clients[0].chain.header_head().unwrap().last_block_hash) - .unwrap() - .epoch_first_block(); - tracing::debug!("SYNC HASH: {:?}", sync_hash); - for shard_id in env.clients[0].epoch_manager.shard_ids(&epoch_id0).unwrap() { - tracing::debug!("Start syncing shard {:?}", shard_id); - let sync_block_header = env.clients[0].chain.get_block_header(&sync_hash).unwrap(); - let sync_prev_header = - env.clients[0].chain.get_previous_header(&sync_block_header).unwrap(); - let sync_prev_prev_hash = sync_prev_header.prev_hash(); - - let state_header = - env.clients[0].chain.compute_state_response_header(shard_id, sync_hash).unwrap(); - let state_root = state_header.chunk_prev_state_root(); - let num_parts = get_num_state_parts(state_header.state_root_node().memory_usage); - - for part_id in 0..num_parts { - tracing::debug!("Syncing part {:?} of {:?}", part_id, num_parts); - let state_part = env.clients[0] - .chain - .runtime_adapter - .obtain_state_part( - shard_id, - sync_prev_prev_hash, - &state_root, - PartId::new(part_id, num_parts), - ) - .unwrap(); - - env.clients[1] - .runtime_adapter - .apply_state_part( - shard_id, - &state_root, - PartId::new(part_id, num_parts), - state_part.as_ref(), - &epoch_id0, - ) - .unwrap(); - } - } - - env.clients[1] - .chain - .reset_heads_post_state_sync( - &None, - sync_hash, - &mut BlockProcessingArtifact::default(), - None, - ) - .unwrap(); - - tracing::debug!("Client 0 Head: {:?}", env.clients[0].chain.head()); - tracing::debug!("Client 1 Head: {:?}", env.clients[1].chain.head()); - - // Do "block sync" for the last epoch - - for block in &blocks { - if *block.header().epoch_id() == epoch_id0 { - tracing::debug!("Receive block {:?}", block.header()); - env.clients[1] - .process_block_test(MaybeValidated::from(block.clone()), Provenance::NONE) - .unwrap(); - } - } - - // Produce blocks on clients[0] and process them on clients[1] - for h in max_height_0..(max_height_0 + 2 * epoch_length) { - tracing::debug!("Produce and process block {}", h); - for tx in generate_transactions(&last_hash, h) { - assert_eq!(env.clients[0].process_tx(tx, false, false), ProcessTxResponse::ValidTx); - } - - let block = env.clients[0].produce_block(h).unwrap().unwrap(); - env.process_block(0, block.clone(), Provenance::PRODUCED); - env.process_block(1, block.clone(), Provenance::NONE); - last_hash = *block.hash(); - } -} diff --git a/integration-tests/src/tests/client/mod.rs b/integration-tests/src/tests/client/mod.rs index fc3f454cb9f..dcb8e04b80e 100644 --- a/integration-tests/src/tests/client/mod.rs +++ b/integration-tests/src/tests/client/mod.rs @@ -3,8 +3,6 @@ mod block_corruption; mod challenges; mod chunks_management; mod cold_storage; -#[cfg(feature = "new_epoch_sync")] -mod epoch_sync; mod features; mod flat_storage; mod process_blocks; diff --git a/integration-tests/src/tests/client/process_blocks.rs b/integration-tests/src/tests/client/process_blocks.rs index de41e1843db..e6e8ebc27eb 100644 --- a/integration-tests/src/tests/client/process_blocks.rs +++ b/integration-tests/src/tests/client/process_blocks.rs @@ -485,7 +485,6 @@ fn produce_block_with_approvals_arrived_early() { "test4".parse().unwrap(), ]]); let archive = vec![false; vs.all_block_producers().count()]; - let epoch_sync_enabled = vec![true; vs.all_block_producers().count()]; let key_pairs = vec![PeerInfo::random(), PeerInfo::random(), PeerInfo::random(), PeerInfo::random()]; let block_holder: Arc>> = Arc::new(RwLock::new(None)); @@ -502,7 +501,6 @@ fn produce_block_with_approvals_arrived_early() { 100, true, archive, - epoch_sync_enabled, false, None, Box::new( @@ -785,7 +783,6 @@ fn ban_peer_for_invalid_block_common(mode: InvalidBlockMode) { 100, true, vec![false; validators.len()], - vec![true; validators.len()], false, None, Box::new( diff --git a/integration-tests/src/tests/client/sync_state_nodes.rs b/integration-tests/src/tests/client/sync_state_nodes.rs index ee7cc48ada8..280a91fda35 100644 --- a/integration-tests/src/tests/client/sync_state_nodes.rs +++ b/integration-tests/src/tests/client/sync_state_nodes.rs @@ -45,7 +45,6 @@ fn sync_state_nodes() { let mut near1 = load_test_config("test1", port1, genesis.clone()); near1.network_config.peer_store.boot_nodes = convert_boot_nodes(vec![]); near1.client_config.min_num_peers = 0; - near1.client_config.epoch_sync_enabled = false; run_actix(async move { let dir1 = tempfile::Builder::new().prefix("sync_nodes_1").tempdir().unwrap(); let nearcore::NearNode { view_client: view_client1, .. } = @@ -77,7 +76,6 @@ fn sync_state_nodes() { near2.client_config.min_num_peers = 1; near2.network_config.peer_store.boot_nodes = convert_boot_nodes(vec![("test1", *port1)]); - near2.client_config.epoch_sync_enabled = false; let dir2 = tempfile::Builder::new() .prefix("sync_nodes_2") @@ -163,7 +161,6 @@ fn sync_state_nodes_multishard() { near1.client_config.min_num_peers = 2; near1.client_config.min_block_production_delay = Duration::milliseconds(200); near1.client_config.max_block_production_delay = Duration::milliseconds(400); - near1.client_config.epoch_sync_enabled = false; let mut near3 = load_test_config("test3", port3, genesis.clone()); near3.network_config.peer_store.boot_nodes = @@ -173,7 +170,6 @@ fn sync_state_nodes_multishard() { near1.client_config.min_block_production_delay; near3.client_config.max_block_production_delay = near1.client_config.max_block_production_delay; - near3.client_config.epoch_sync_enabled = false; let mut near4 = load_test_config("test4", port4, genesis.clone()); near4.network_config.peer_store.boot_nodes = @@ -183,7 +179,6 @@ fn sync_state_nodes_multishard() { near1.client_config.min_block_production_delay; near4.client_config.max_block_production_delay = near1.client_config.max_block_production_delay; - near4.client_config.epoch_sync_enabled = false; let dir1 = tempfile::Builder::new().prefix("sync_nodes_1").tempdir().unwrap(); let nearcore::NearNode { view_client: view_client1, .. } = @@ -228,7 +223,6 @@ fn sync_state_nodes_multishard() { ("test3", *port3), ("test4", *port4), ]); - near2.client_config.epoch_sync_enabled = false; let dir2 = tempfile::Builder::new() .prefix("sync_nodes_2") @@ -316,7 +310,6 @@ fn sync_empty_state() { near1.client_config.min_num_peers = 0; near1.client_config.min_block_production_delay = Duration::milliseconds(200); near1.client_config.max_block_production_delay = Duration::milliseconds(400); - near1.client_config.epoch_sync_enabled = false; let dir1 = tempfile::Builder::new().prefix("sync_nodes_1").tempdir().unwrap(); let nearcore::NearNode { view_client: view_client1, .. } = @@ -357,7 +350,6 @@ fn sync_empty_state() { near2.client_config.block_fetch_horizon = block_fetch_horizon; near2.client_config.tracked_shards = vec![0, 1, 2, 3]; - near2.client_config.epoch_sync_enabled = false; let nearcore::NearNode { view_client: view_client2, @@ -446,7 +438,6 @@ fn sync_state_dump() { // An epoch passes in about 9 seconds. near1.client_config.min_block_production_delay = Duration::milliseconds(300); near1.client_config.max_block_production_delay = Duration::milliseconds(600); - near1.client_config.epoch_sync_enabled = false; near1.client_config.tracked_shards = vec![0]; // Track all shards. let dump_dir = tempfile::Builder::new().prefix("state_dump_1").tempdir().unwrap(); near1.client_config.state_sync.dump = Some(DumpConfig { @@ -494,7 +485,6 @@ fn sync_state_dump() { block_header_fetch_horizon; near2.client_config.block_fetch_horizon = block_fetch_horizon; near2.client_config.tracked_shards = vec![0]; // Track all shards. - near2.client_config.epoch_sync_enabled = false; near2.client_config.state_sync_enabled = true; near2.client_config.state_sync_timeout = Duration::seconds(2); near2.client_config.state_sync.sync = @@ -754,7 +744,6 @@ fn test_state_sync_headers() { let mut near1 = load_test_config("test1", tcp::ListenerAddr::reserve_for_test(), genesis.clone()); near1.client_config.min_num_peers = 0; - near1.client_config.epoch_sync_enabled = false; near1.client_config.tracked_shards = vec![0]; // Track all shards. let dir1 = tempfile::Builder::new().prefix("test_state_sync_headers").tempdir().unwrap(); @@ -948,7 +937,6 @@ fn test_state_sync_headers_no_tracked_shards() { let port1 = tcp::ListenerAddr::reserve_for_test(); let mut near1 = load_test_config("test1", port1, genesis.clone()); near1.client_config.min_num_peers = 0; - near1.client_config.epoch_sync_enabled = false; near1.client_config.tracked_shards = vec![0]; // Track all shards, it is a validator. let dir1 = tempfile::Builder::new() .prefix("test_state_sync_headers_no_tracked_shards_1") @@ -965,7 +953,6 @@ fn test_state_sync_headers_no_tracked_shards() { near2.network_config.peer_store.boot_nodes = convert_boot_nodes(vec![("test1", *port1)]); near2.client_config.min_num_peers = 0; - near2.client_config.epoch_sync_enabled = false; near2.client_config.tracked_shards = vec![]; // Track no shards. let dir2 = tempfile::Builder::new() .prefix("test_state_sync_headers_no_tracked_shards_2") diff --git a/integration-tests/src/tests/mod.rs b/integration-tests/src/tests/mod.rs index 1b44acbaac5..da6bb58aafd 100644 --- a/integration-tests/src/tests/mod.rs +++ b/integration-tests/src/tests/mod.rs @@ -2,8 +2,6 @@ mod client; mod dependencies; mod genesis_helpers; mod nearcore; -#[cfg(feature = "new_epoch_sync")] -mod nearcore_utils; mod network; mod runtime; mod standard_cases; diff --git a/integration-tests/src/tests/nearcore/node_cluster.rs b/integration-tests/src/tests/nearcore/node_cluster.rs index 05b7802564f..cf79dc62257 100644 --- a/integration-tests/src/tests/nearcore/node_cluster.rs +++ b/integration-tests/src/tests/nearcore/node_cluster.rs @@ -53,7 +53,6 @@ fn start_nodes( if i >= num_validator_seats && i < num_tracking_nodes { near_config.client_config.tracked_shards = vec![0]; } - near_config.client_config.epoch_sync_enabled = false; near_configs.push(near_config); } diff --git a/integration-tests/src/tests/nearcore/stake_nodes.rs b/integration-tests/src/tests/nearcore/stake_nodes.rs index 9b8eb41d4f1..43a3eab5bb5 100644 --- a/integration-tests/src/tests/nearcore/stake_nodes.rs +++ b/integration-tests/src/tests/nearcore/stake_nodes.rs @@ -81,7 +81,6 @@ fn init_test_staking( convert_boot_nodes(vec![("near.0", *first_node)]); } config.client_config.min_num_peers = num_node_seats as usize - 1; - config.client_config.epoch_sync_enabled = false; config }); configs diff --git a/integration-tests/src/tests/nearcore/sync_nodes.rs b/integration-tests/src/tests/nearcore/sync_nodes.rs index bb14a5ce226..0650f6529ad 100644 --- a/integration-tests/src/tests/nearcore/sync_nodes.rs +++ b/integration-tests/src/tests/nearcore/sync_nodes.rs @@ -36,13 +36,11 @@ fn sync_state_stake_change() { near1.network_config.peer_store.boot_nodes = convert_boot_nodes(vec![("test2", *port2)]); near1.client_config.min_num_peers = 0; near1.client_config.min_block_production_delay = Duration::milliseconds(200); - near1.client_config.epoch_sync_enabled = false; let mut near2 = load_test_config("test2", port2, genesis.clone()); near2.network_config.peer_store.boot_nodes = convert_boot_nodes(vec![("test1", *port1)]); near2.client_config.min_block_production_delay = Duration::milliseconds(200); near2.client_config.min_num_peers = 1; near2.client_config.skip_sync_wait = false; - near2.client_config.epoch_sync_enabled = false; let dir1 = tempfile::Builder::new().prefix("sync_state_stake_change_1").tempdir().unwrap(); let dir2 = tempfile::Builder::new().prefix("sync_state_stake_change_2").tempdir().unwrap(); diff --git a/integration-tests/src/tests/nearcore_utils.rs b/integration-tests/src/tests/nearcore_utils.rs index de74074e933..80e2350a85f 100644 --- a/integration-tests/src/tests/nearcore_utils.rs +++ b/integration-tests/src/tests/nearcore_utils.rs @@ -118,13 +118,11 @@ pub fn setup_configs_with_epoch_length( let mut near1 = load_test_config("test1", port1, genesis.clone()); near1.network_config.peer_store.boot_nodes = convert_boot_nodes(vec![("test2", *port2)]); near1.client_config.min_num_peers = 1; - near1.client_config.epoch_sync_enabled = false; near1.client_config.state_sync_enabled = true; let mut near2 = load_test_config("test2", port2, genesis.clone()); near2.network_config.peer_store.boot_nodes = convert_boot_nodes(vec![("test1", *port1)]); near2.client_config.min_num_peers = 1; - near2.client_config.epoch_sync_enabled = false; near2.client_config.state_sync_enabled = true; (genesis, genesis_block, near1, near2) diff --git a/integration-tests/src/tests/network/runner.rs b/integration-tests/src/tests/network/runner.rs index 9a3979b2e38..1c18fbd1604 100644 --- a/integration-tests/src/tests/network/runner.rs +++ b/integration-tests/src/tests/network/runner.rs @@ -73,8 +73,7 @@ fn setup_network_node( ActixWrapper::new(TelemetryActor::new(TelemetryConfig::default())).start(); let db = node_storage.into_inner(near_store::Temperature::Hot); - let mut client_config = - ClientConfig::test(false, 100, 200, num_validators, false, true, true, true); + let mut client_config = ClientConfig::test(false, 100, 200, num_validators, false, true, true); client_config.archive = config.archive; client_config.ttl_account_id_router = config.ttl_account_id_router.try_into().unwrap(); let state_roots = near_store::get_genesis_state_roots(runtime.store()) diff --git a/nearcore/Cargo.toml b/nearcore/Cargo.toml index 64977e9231b..f62b5d2c8ab 100644 --- a/nearcore/Cargo.toml +++ b/nearcore/Cargo.toml @@ -121,9 +121,6 @@ protocol_feature_fix_contract_loading_cost = [ protocol_feature_nonrefundable_transfer_nep491 = [ "near-primitives/protocol_feature_nonrefundable_transfer_nep491", ] -new_epoch_sync = [ - "near-client/new_epoch_sync" -] nightly = [ "near-actix-test-utils/nightly", diff --git a/nearcore/src/config.rs b/nearcore/src/config.rs index efad1a49a86..8a36ffff5d6 100644 --- a/nearcore/src/config.rs +++ b/nearcore/src/config.rs @@ -9,24 +9,24 @@ use near_chain_configs::test_utils::{ TESTING_INIT_BALANCE, TESTING_INIT_STAKE, }; use near_chain_configs::{ - default_enable_multiline_logging, default_epoch_sync_enabled, - default_header_sync_expected_height_per_second, default_header_sync_initial_timeout, - default_header_sync_progress_timeout, default_header_sync_stall_ban_timeout, - default_log_summary_period, default_orphan_state_witness_max_size, - default_orphan_state_witness_pool_size, default_produce_chunk_add_transactions_time_limit, - default_state_sync, default_state_sync_enabled, default_state_sync_timeout, - default_sync_check_period, default_sync_height_threshold, default_sync_max_block_requests, - default_sync_step_period, default_transaction_pool_size_limit, - default_trie_viewer_state_size_limit, default_tx_routing_height_horizon, - default_view_client_threads, default_view_client_throttle_period, get_initial_supply, - ChunkDistributionNetworkConfig, ClientConfig, GCConfig, Genesis, GenesisConfig, - GenesisValidationMode, LogSummaryStyle, MutableConfigValue, MutableValidatorSigner, - ReshardingConfig, StateSyncConfig, BLOCK_PRODUCER_KICKOUT_THRESHOLD, - CHUNK_PRODUCER_KICKOUT_THRESHOLD, CHUNK_VALIDATOR_ONLY_KICKOUT_THRESHOLD, - EXPECTED_EPOCH_LENGTH, FISHERMEN_THRESHOLD, GAS_PRICE_ADJUSTMENT_RATE, GENESIS_CONFIG_FILENAME, - INITIAL_GAS_LIMIT, MAX_INFLATION_RATE, MIN_BLOCK_PRODUCTION_DELAY, MIN_GAS_PRICE, NEAR_BASE, - NUM_BLOCKS_PER_YEAR, NUM_BLOCK_PRODUCER_SEATS, PROTOCOL_REWARD_RATE, - PROTOCOL_UPGRADE_STAKE_THRESHOLD, TRANSACTION_VALIDITY_PERIOD, + default_enable_multiline_logging, default_header_sync_expected_height_per_second, + default_header_sync_initial_timeout, default_header_sync_progress_timeout, + default_header_sync_stall_ban_timeout, default_log_summary_period, + default_orphan_state_witness_max_size, default_orphan_state_witness_pool_size, + default_produce_chunk_add_transactions_time_limit, default_state_sync, + default_state_sync_enabled, default_state_sync_timeout, default_sync_check_period, + default_sync_height_threshold, default_sync_max_block_requests, default_sync_step_period, + default_transaction_pool_size_limit, default_trie_viewer_state_size_limit, + default_tx_routing_height_horizon, default_view_client_threads, + default_view_client_throttle_period, get_initial_supply, ChunkDistributionNetworkConfig, + ClientConfig, GCConfig, Genesis, GenesisConfig, GenesisValidationMode, LogSummaryStyle, + MutableConfigValue, MutableValidatorSigner, ReshardingConfig, StateSyncConfig, + BLOCK_PRODUCER_KICKOUT_THRESHOLD, CHUNK_PRODUCER_KICKOUT_THRESHOLD, + CHUNK_VALIDATOR_ONLY_KICKOUT_THRESHOLD, EXPECTED_EPOCH_LENGTH, FISHERMEN_THRESHOLD, + GAS_PRICE_ADJUSTMENT_RATE, GENESIS_CONFIG_FILENAME, INITIAL_GAS_LIMIT, MAX_INFLATION_RATE, + MIN_BLOCK_PRODUCTION_DELAY, MIN_GAS_PRICE, NEAR_BASE, NUM_BLOCKS_PER_YEAR, + NUM_BLOCK_PRODUCER_SEATS, PROTOCOL_REWARD_RATE, PROTOCOL_UPGRADE_STAKE_THRESHOLD, + TRANSACTION_VALIDITY_PERIOD, }; use near_config_utils::{ValidationError, ValidationErrors}; use near_crypto::{InMemorySigner, KeyFile, KeyType, PublicKey}; @@ -249,7 +249,6 @@ pub struct Config { #[serde(flatten)] pub gc: GCConfig, pub view_client_threads: usize, - pub epoch_sync_enabled: bool, #[serde(with = "near_async::time::serde_duration_as_std")] pub view_client_throttle_period: Duration, pub trie_viewer_state_size_limit: Option, @@ -350,7 +349,6 @@ impl Default for Config { log_summary_style: LogSummaryStyle::Colored, log_summary_period: default_log_summary_period(), gc: GCConfig::default(), - epoch_sync_enabled: default_epoch_sync_enabled(), view_client_threads: default_view_client_threads(), view_client_throttle_period: default_view_client_throttle_period(), trie_viewer_state_size_limit: default_trie_viewer_state_size_limit(), @@ -571,7 +569,6 @@ impl NearConfig { log_summary_style: config.log_summary_style, gc: config.gc, view_client_threads: config.view_client_threads, - epoch_sync_enabled: config.epoch_sync_enabled, view_client_throttle_period: config.view_client_throttle_period, trie_viewer_state_size_limit: config.trie_viewer_state_size_limit, max_gas_burnt_view: config.max_gas_burnt_view, diff --git a/neard/Cargo.toml b/neard/Cargo.toml index eec862afca9..9f33f06400a 100644 --- a/neard/Cargo.toml +++ b/neard/Cargo.toml @@ -41,7 +41,6 @@ near-config-utils.workspace = true near-crypto.workspace = true near-database-tool.workspace = true near-dyn-configs.workspace = true -near-epoch-sync-tool = { workspace = true, optional = true } near-flat-storage.workspace = true near-fork-network.workspace = true near-jsonrpc-primitives.workspace = true @@ -74,7 +73,6 @@ rosetta_rpc = ["nearcore/rosetta_rpc"] json_rpc = ["nearcore/json_rpc"] protocol_feature_fix_staking_threshold = ["nearcore/protocol_feature_fix_staking_threshold"] protocol_feature_nonrefundable_transfer_nep491 = ["near-state-viewer/protocol_feature_nonrefundable_transfer_nep491"] -new_epoch_sync = ["nearcore/new_epoch_sync", "near-epoch-sync-tool/new_epoch_sync"] nightly = [ "near-chain-configs/nightly", diff --git a/neard/src/cli.rs b/neard/src/cli.rs index 7da640eb423..6fdeaf2028b 100644 --- a/neard/src/cli.rs +++ b/neard/src/cli.rs @@ -6,8 +6,6 @@ use near_client::ConfigUpdater; use near_cold_store_tool::ColdStoreCommand; use near_database_tool::commands::DatabaseCommand; use near_dyn_configs::{UpdateableConfigLoader, UpdateableConfigLoaderError, UpdateableConfigs}; -#[cfg(feature = "new_epoch_sync")] -use near_epoch_sync_tool::EpochSyncCommand; use near_flat_storage::commands::FlatStorageCommand; use near_fork_network::cli::ForkNetworkCommand; use near_jsonrpc_primitives::types::light_client::RpcLightClientExecutionProofResponse; @@ -144,10 +142,6 @@ impl NeardCmd { NeardSubCommand::StatePartsDumpCheck(cmd) => { cmd.run()?; } - #[cfg(feature = "new_epoch_sync")] - NeardSubCommand::EpochSync(cmd) => { - cmd.run(&home_dir)?; - } NeardSubCommand::ReplayArchive(cmd) => { cmd.run(&home_dir)?; } @@ -257,10 +251,6 @@ pub(super) enum NeardSubCommand { /// Replays the blocks in the chain from an archival node. ReplayArchive(ReplayArchiveCommand), - - #[cfg(feature = "new_epoch_sync")] - /// Testing tool for epoch sync - EpochSync(EpochSyncCommand), } #[derive(clap::Parser)] diff --git a/tools/epoch-sync/Cargo.toml b/tools/epoch-sync/Cargo.toml deleted file mode 100644 index 476c4ae1910..00000000000 --- a/tools/epoch-sync/Cargo.toml +++ /dev/null @@ -1,44 +0,0 @@ -[package] -name = "near-epoch-sync-tool" -version.workspace = true -authors.workspace = true -edition.workspace = true -rust-version.workspace = true -repository.workspace = true -license.workspace = true -publish = false - -[lints] -workspace = true - -# The dependencies are marked optional because we only need them when the -# new_epoch_sync feature is enabled. -[dependencies] -anyhow = { workspace = true, optional = true } -clap = { workspace = true, optional = true } -tracing = { workspace = true, optional = true } - -nearcore = { workspace = true, optional = true } -near-chain = { workspace = true, optional = true } -near-chain-configs = { workspace = true, optional = true } -near-epoch-manager = { workspace = true, optional = true } -near-primitives = { workspace = true, optional = true } -near-store = { workspace = true, optional = true } - -[features] - -default = [] -new_epoch_sync = [ - "nearcore/new_epoch_sync", - - "dep:anyhow", - "dep:clap", - "dep:tracing", - - "dep:nearcore", - "dep:near-chain", - "dep:near-chain-configs", - "dep:near-epoch-manager", - "dep:near-primitives", - "dep:near-store", -] diff --git a/tools/epoch-sync/src/cli.rs b/tools/epoch-sync/src/cli.rs deleted file mode 100644 index f408cd384b1..00000000000 --- a/tools/epoch-sync/src/cli.rs +++ /dev/null @@ -1,301 +0,0 @@ -use anyhow::Context; -use clap; -use near_chain::{ChainStore, ChainStoreAccess, ChainUpdate, DoomslugThresholdMode}; -use near_epoch_manager::EpochManager; -use near_primitives::block::BlockHeader; -use near_primitives::borsh::BorshDeserialize; -use near_primitives::epoch_block_info::BlockInfo; -use near_primitives::epoch_manager::AGGREGATOR_KEY; -use near_primitives::hash::CryptoHash; -use near_store::{checkpoint_hot_storage_and_cleanup_columns, DBCol, NodeStorage}; -use nearcore::{NightshadeRuntime, NightshadeRuntimeExt}; -use std::collections::{HashMap, HashSet}; -use std::path::{Path, PathBuf}; - -#[derive(clap::Parser)] -pub struct EpochSyncCommand { - #[clap(subcommand)] - subcmd: SubCommand, -} - -#[derive(clap::Parser)] -#[clap(subcommand_required = true, arg_required_else_help = true)] -enum SubCommand { - /// For every finished epoch construct `EpochSyncInfo` - /// and validate it the same way we would if we received it from a peer. - ValidateEpochSyncInfo(ValidateEpochSyncInfoCmd), -} - -impl EpochSyncCommand { - pub fn run(self, home_dir: &Path) -> anyhow::Result<()> { - let mut near_config = Self::create_snapshot(home_dir)?; - let storage = nearcore::open_storage(&home_dir, &mut near_config)?; - - match self.subcmd { - SubCommand::ValidateEpochSyncInfo(cmd) => cmd.run(&home_dir, &storage, &near_config), - } - } - - fn create_snapshot(home_dir: &Path) -> anyhow::Result { - let mut near_config = nearcore::config::load_config( - &home_dir, - near_chain_configs::GenesisValidationMode::UnsafeFast, - ) - .unwrap_or_else(|e| panic!("Error loading config: {e:#}")); - - let store_path_addition = near_config - .config - .store - .path - .clone() - .unwrap_or_else(|| PathBuf::from("data")) - .join("epoch-sync-snapshot"); - let snapshot_path = home_dir.join(store_path_addition.clone()); - - let storage = nearcore::open_storage(&home_dir, &mut near_config)?; - - if snapshot_path.exists() && snapshot_path.is_dir() { - tracing::info!(?snapshot_path, "Found a DB snapshot"); - } else { - tracing::info!(destination = ?snapshot_path, "Creating snapshot of original DB"); - // checkpointing only hot storage, because cold storage will not be changed - checkpoint_hot_storage_and_cleanup_columns( - &storage.get_hot_store(), - &snapshot_path, - None, - )?; - } - - near_config.config.store.path = Some(store_path_addition.join("data")); - - Ok(near_config) - } -} - -#[derive(clap::Parser)] -struct ValidateEpochSyncInfoCmd { - /// If `archive` flag is specified, `BlockInfo` column is assumed to be full and is used for optimisation purposes. - /// Using `BlockInfo` (`--archive` flag) takes around 10 minutes. - /// Using `BlockHeader` takes around 1.5 hours. - #[clap(short, long)] - archive: bool, -} - -impl ValidateEpochSyncInfoCmd { - pub fn run( - &self, - home_dir: &Path, - storage: &NodeStorage, - config: &nearcore::config::NearConfig, - ) -> anyhow::Result<()> { - let store = storage.get_hot_store(); - - let hash_to_prev_hash = if self.archive { - get_hash_to_prev_hash_from_block_info(storage)? - } else { - get_hash_to_prev_hash_from_block_header(storage)? - }; - let epoch_ids = get_all_epoch_ids(storage)?; - - let mut chain_store = - ChainStore::new(store.clone(), config.genesis.config.genesis_height, false); - let header_head_hash = chain_store.header_head()?.last_block_hash; - let hash_to_next_hash = get_hash_to_next_hash(&hash_to_prev_hash, &header_head_hash)?; - - let epoch_manager = - EpochManager::new_arc_handle(storage.get_hot_store(), &config.genesis.config); - let runtime = NightshadeRuntime::from_config( - home_dir, - storage.get_hot_store(), - &config, - epoch_manager.clone(), - ) - .context("could not create the transaction runtime")?; - let chain_update = ChainUpdate::new( - &mut chain_store, - epoch_manager, - runtime, - DoomslugThresholdMode::TwoThirds, - config.genesis.config.transaction_validity_period, - ); - - let genesis_hash = store - .get_ser::( - DBCol::BlockHeight, - &config.genesis.config.genesis_height.to_le_bytes(), - )? - .expect("Expect genesis height to be present in BlockHeight column"); - - let mut cur_hash = header_head_hash; - - // Edge case if we exactly at the epoch boundary. - // In this case we cannot create `EpochSyncInfo` for this epoch yet, - // as there is no block header with `epoch_sync_data_hash` for that epoch. - if epoch_ids.contains(&cur_hash) { - cur_hash = hash_to_prev_hash[&cur_hash]; - } - - let mut num_errors = 0; - - while cur_hash != genesis_hash { - tracing::debug!(?cur_hash, "Big loop hash"); - - // epoch ids are the last hashes of some epochs - if epoch_ids.contains(&cur_hash) { - let last_header = store - .get_ser::(DBCol::BlockHeader, cur_hash.as_ref())? - .context("BlockHeader for cur_hash not found")?; - let last_finalized_height = - if *last_header.last_final_block() == CryptoHash::default() { - 0 - } else { - let last_finalized_header = store - .get_ser::( - DBCol::BlockHeader, - last_header.last_final_block().as_ref(), - )? - .context("BlockHeader for cur_hash.last_final_block not found")?; - last_finalized_header.height() - }; - - loop { - let prev_hash = hash_to_prev_hash[&cur_hash]; - if epoch_ids.contains(&prev_hash) { - // prev_hash is the end of previous epoch - // cur_hash is the start of current epoch - break; - } else { - // prev_hash is still in the current epoch - // we descent to it - cur_hash = prev_hash; - } - } - - let first_block_hash = cur_hash; - - let mut last_block_info = BlockInfo::new( - *last_header.hash(), - last_header.height(), - last_finalized_height, - *last_header.last_final_block(), - *last_header.prev_hash(), - last_header.prev_validator_proposals().collect(), - last_header.chunk_mask().to_vec(), - vec![], - last_header.total_supply(), - last_header.latest_protocol_version(), - last_header.raw_timestamp(), - ); - - *last_block_info.epoch_id_mut() = *last_header.epoch_id(); - *last_block_info.epoch_first_block_mut() = first_block_hash; - - let next_epoch_first_hash = hash_to_next_hash[last_header.hash()]; - tracing::debug!("Creating EpochSyncInfo from block {:?}", last_header); - - let epoch_sync_info = chain_update.create_epoch_sync_info( - &last_block_info, - &next_epoch_first_hash, - Some(&hash_to_prev_hash), - )?; - - let calculated_epoch_sync_data_hash_result = - epoch_sync_info.calculate_epoch_sync_data_hash(); - let canonical_epoch_sync_data_hash_result = - epoch_sync_info.get_epoch_sync_data_hash(); - - if let (Ok(calculated), Ok(Some(canonical))) = ( - &calculated_epoch_sync_data_hash_result, - &canonical_epoch_sync_data_hash_result, - ) { - if calculated == canonical { - tracing::info!( - "EpochSyncInfo for height {:?} OK", - epoch_sync_info.epoch_info.epoch_height() - ); - continue; - } - } - tracing::error!( - "EpochSyncInfo for height {:?} ERROR {:?} {:?}", - epoch_sync_info.epoch_info.epoch_height(), - calculated_epoch_sync_data_hash_result, - canonical_epoch_sync_data_hash_result - ); - num_errors += 1; - } else { - cur_hash = hash_to_prev_hash[&cur_hash]; - } - } - assert_eq!(num_errors, 0); - Ok(()) - } -} - -/// Creates mapping from `cur_hash` to `prev_hash` by iterating through `BlockInfo` column. -/// Mapping from `cur_hash` to `prev_hash` is unique, as there are no two blocks with the same hash. -/// This means there will not be key collision. Value collision may happen, but it is irrelevant for `HashMap`. -fn get_hash_to_prev_hash_from_block_info( - storage: &NodeStorage, -) -> anyhow::Result> { - let mut hash_to_prev_hash = HashMap::new(); - let store = storage.get_split_store().unwrap_or_else(|| storage.get_hot_store()); - for result in store.iter(DBCol::BlockInfo) { - let (_, value) = result?; - let block_info = - BlockInfo::try_from_slice(value.as_ref()).expect("Failed to deser BlockInfo"); - if block_info.hash() != block_info.prev_hash() { - hash_to_prev_hash.insert(*block_info.hash(), *block_info.prev_hash()); - } - } - Ok(hash_to_prev_hash) -} - -/// Creates mapping from `cur_hash` to `prev_hash` by iterating through `BlockHeader` column. -/// Mapping from `cur_hash` to `prev_hash` is unique, as there are no two blocks with the same hash. -/// This means there will not be key collision. Value collision may happen, but it is irrelevant for `HashMap`. -fn get_hash_to_prev_hash_from_block_header( - storage: &NodeStorage, -) -> anyhow::Result> { - let mut hash_to_prev_hash = HashMap::new(); - for result in storage.get_hot_store().iter(DBCol::BlockHeader) { - let (_, value) = result?; - let block_header = - BlockHeader::try_from_slice(value.as_ref()).expect("Failed to deser BlockHeader"); - if block_header.hash() != block_header.prev_hash() { - hash_to_prev_hash.insert(*block_header.hash(), *block_header.prev_hash()); - } - } - Ok(hash_to_prev_hash) -} - -/// Creates mapping from `cur_hash` to `next_hash` for the chain ending in `chain_head` -/// by descending through mapping from `cur_hash` to `prev_hash`. -/// Only builds mapping for one chain to avoid key collision due to forks. -fn get_hash_to_next_hash( - hash_to_prev_hash: &HashMap, - chain_head: &CryptoHash, -) -> anyhow::Result> { - let mut hash_to_next_hash = HashMap::new(); - let mut cur_head = *chain_head; - while let Some(prev_hash) = hash_to_prev_hash.get(&cur_head) { - hash_to_next_hash.insert(*prev_hash, cur_head); - cur_head = *prev_hash; - } - Ok(hash_to_next_hash) -} - -/// Get all `EpochId`s by iterating `EpochInfo` column and return them as `HashSet`. -/// This function is used to get hashes of all last epoch blocks as `EpochId` represents last hash of prev prev column. -fn get_all_epoch_ids(storage: &NodeStorage) -> anyhow::Result> { - let mut epoch_ids = HashSet::new(); - for result in storage.get_hot_store().iter(DBCol::EpochInfo) { - let (key, _) = result?; - if key.as_ref() == AGGREGATOR_KEY { - continue; - } - epoch_ids - .insert(CryptoHash::try_from_slice(key.as_ref()).expect("Failed to deser CryptoHash")); - } - Ok(epoch_ids) -} diff --git a/tools/epoch-sync/src/lib.rs b/tools/epoch-sync/src/lib.rs deleted file mode 100644 index 28db34fd113..00000000000 --- a/tools/epoch-sync/src/lib.rs +++ /dev/null @@ -1,4 +0,0 @@ -#[cfg(feature = "new_epoch_sync")] -pub mod cli; -#[cfg(feature = "new_epoch_sync")] -pub use cli::EpochSyncCommand;