From eee1b05ce8024a4834b3416c85ea567f394e75af Mon Sep 17 00:00:00 2001 From: Roshan <48975233+pythonberg1997@users.noreply.github.com> Date: Mon, 22 Jul 2024 16:53:00 +0800 Subject: [PATCH 1/7] fix: read sidecars from table in `get_take_block_range` (#79) * fix: read sidecars from table in `get_take_block_range` * fix lint issue --- .../provider/src/providers/database/provider.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 1f6c757c4a..75354890a4 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -969,6 +969,7 @@ impl DatabaseProvider { let block_withdrawals = self.get_or_take::(range.clone())?; let block_requests = self.get_or_take::(range.clone())?; + let block_sidecars = self.get_or_take::(range.clone())?; let block_tx = self.get_take_block_transaction_range::(range.clone())?; @@ -993,9 +994,11 @@ impl DatabaseProvider { let mut block_ommers_iter = block_ommers.into_iter(); let mut block_withdrawals_iter = block_withdrawals.into_iter(); let mut block_requests_iter = block_requests.into_iter(); + let mut block_sidecars_iter = block_sidecars.into_iter(); let mut block_ommers = block_ommers_iter.next(); let mut block_withdrawals = block_withdrawals_iter.next(); let mut block_requests = block_requests_iter.next(); + let mut block_sidecars = block_sidecars_iter.next(); let mut blocks = Vec::new(); for ((main_block_number, header), (_, header_hash), (_, tx)) in @@ -1044,10 +1047,17 @@ impl DatabaseProvider { } // sidecars can be missing - let sidecars = if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - self.static_file_provider.sidecars(&header.hash())? + let cancun_is_active = self.chain_spec.is_cancun_active_at_timestamp(header.timestamp); + let mut sidecars = Some(BlobSidecars::default()); + if cancun_is_active { + if let Some((block_number, _)) = block_sidecars.as_ref() { + if *block_number == main_block_number { + sidecars = Some(block_sidecars.take().unwrap().1); + block_sidecars = block_sidecars_iter.next(); + } + } } else { - None + sidecars = None; }; blocks.push(SealedBlockWithSenders { From 3d8df4a01b0d1a34056ba3838c03c01a092a6a75 Mon Sep 17 00:00:00 2001 From: dylanhuang Date: Wed, 24 Jul 2024 13:07:27 +0800 Subject: [PATCH 2/7] fix: check parent hash of disconnected headers (#81) --- crates/bsc/engine/src/task.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crates/bsc/engine/src/task.rs b/crates/bsc/engine/src/task.rs index e2bffe8832..280a289125 100644 --- a/crates/bsc/engine/src/task.rs +++ b/crates/bsc/engine/src/task.rs @@ -310,6 +310,13 @@ impl< } disconnected_headers.push(sealed_header.clone()); } + + // check last header.parent_hash is match the trusted header + if !disconnected_headers.is_empty() && + disconnected_headers.last().unwrap().parent_hash != trusted_header.hash() + { + continue; + } }; // cache header and block From 996bc4164b6dff30760fe36a3ff0f4dd810cb6f6 Mon Sep 17 00:00:00 2001 From: dylanhuang Date: Mon, 29 Jul 2024 10:17:15 +0800 Subject: [PATCH 3/7] fix: parlia live sync issue (#83) * fix: check hash value of sealed headers to prevent incomplete headers * fix: check_consistency for sidecar * fix: fix disconnected header check * fix: print the mgas usage log for InsertDownloadedPayload action * fix: order of publishing fcu * fix: use MIN_BLOCKS_FOR_PIPELINE_RUN instead of EPOCH_SLOTS * Revert "fix: check_consistency for sidecar" This reverts commit 19e017df7d52885aad79947a0634ee93c6356cbc. * fix: change fetch header priority to normal * fix: clean the cache if downloader report the header/body is invalid * test: add ut for storage cache clean --- crates/bsc/engine/src/client.rs | 25 ++++++++-- crates/bsc/engine/src/lib.rs | 49 ++++++++++++++++++ crates/bsc/engine/src/task.rs | 60 +++++++++++++++-------- crates/consensus/beacon/src/engine/mod.rs | 11 ++++- 4 files changed, 118 insertions(+), 27 deletions(-) diff --git a/crates/bsc/engine/src/client.rs b/crates/bsc/engine/src/client.rs index 27643fb253..0c2f35a23e 100644 --- a/crates/bsc/engine/src/client.rs +++ b/crates/bsc/engine/src/client.rs @@ -29,11 +29,13 @@ pub struct ParliaClient { /// cached header and body storage: Storage, fetch_client: FetchClient, + peer_id: PeerId, } impl ParliaClient { - pub(crate) const fn new(storage: Storage, fetch_client: FetchClient) -> Self { - Self { storage, fetch_client } + pub(crate) fn new(storage: Storage, fetch_client: FetchClient) -> Self { + let peer_id = PeerId::random(); + Self { storage, fetch_client, peer_id } } async fn fetch_headers(&self, request: HeadersRequest) -> InnerFetchHeaderResult { @@ -87,6 +89,11 @@ impl ParliaClient { Ok(bodies) } + + async fn clean_cache(&self) { + let mut storage = self.storage.write().await; + storage.clean_caches() + } } impl HeadersClient for ParliaClient { @@ -98,10 +105,11 @@ impl HeadersClient for ParliaClient { priority: Priority, ) -> Self::Output { let this = self.clone(); + let peer_id = self.peer_id; Box::pin(async move { let result = this.fetch_headers(request.clone()).await; if let Ok(headers) = result { - return Ok(WithPeerId::new(PeerId::random(), headers)); + return Ok(WithPeerId::new(peer_id, headers)); } this.fetch_client.get_headers_with_priority(request.clone(), priority).await }) @@ -117,10 +125,11 @@ impl BodiesClient for ParliaClient { priority: Priority, ) -> Self::Output { let this = self.clone(); + let peer_id = self.peer_id; Box::pin(async move { let result = this.fetch_bodies(hashes.clone()).await; if let Ok(blocks) = result { - return Ok(WithPeerId::new(PeerId::random(), blocks)); + return Ok(WithPeerId::new(peer_id, blocks)); } this.fetch_client.get_block_bodies_with_priority(hashes.clone(), priority).await }) @@ -130,7 +139,13 @@ impl BodiesClient for ParliaClient { impl DownloadClient for ParliaClient { fn report_bad_message(&self, peer_id: PeerId) { let this = self.clone(); - this.fetch_client.report_bad_message(peer_id) + if peer_id == self.peer_id { + tokio::spawn(async move { + this.clean_cache().await; + }); + } else { + this.fetch_client.report_bad_message(peer_id) + } } fn num_connected_peers(&self) -> usize { diff --git a/crates/bsc/engine/src/lib.rs b/crates/bsc/engine/src/lib.rs index 37dcfb75c8..a34e74768f 100644 --- a/crates/bsc/engine/src/lib.rs +++ b/crates/bsc/engine/src/lib.rs @@ -219,6 +219,13 @@ impl StorageInner { self.best_finalized_hash = finalized; self.best_safe_hash = safe; } + + /// Cleans the caches + pub(crate) fn clean_caches(&mut self) { + self.headers = LimitedHashSet::new(STORAGE_CACHE_NUM); + self.hash_to_number = LimitedHashSet::new(STORAGE_CACHE_NUM); + self.bodies = LimitedHashSet::new(STORAGE_CACHE_NUM); + } } #[derive(Debug)] @@ -322,4 +329,46 @@ mod tests { assert_eq!(set.get(&2), Some(&2)); assert_eq!(set.get(&3), Some(&3)); } + + #[test] + fn test_clean_cache() { + let default_block = Header::default().seal_slow(); + let mut storage = StorageInner { + best_hash: default_block.hash(), + best_block: default_block.number, + best_header: default_block.clone(), + headers: LimitedHashSet::new(10), + hash_to_number: LimitedHashSet::new(10), + bodies: LimitedHashSet::new(10), + best_finalized_hash: B256::default(), + best_safe_hash: B256::default(), + }; + storage.headers.put(default_block.number, default_block.clone()); + storage.hash_to_number.put(default_block.hash(), default_block.number); + + let block = Header::default().seal_slow(); + storage.insert_new_block(block.clone(), BlockBody::default()); + assert_eq!(storage.best_block, block.number); + assert_eq!(storage.best_hash, block.hash()); + assert_eq!(storage.best_header, block); + assert_eq!(storage.headers.get(&block.number), Some(&block)); + assert_eq!(storage.hash_to_number.get(&block.hash()), Some(&block.number)); + assert_eq!(storage.bodies.get(&block.hash()), Some(&BlockBody::default())); + assert_eq!( + storage.header_by_hash_or_number(BlockHashOrNumber::Hash(block.hash())), + Some(block.clone()) + ); + assert_eq!( + storage.header_by_hash_or_number(BlockHashOrNumber::Number(block.number)), + Some(block.clone()) + ); + assert_eq!(storage.best_block, block.number); + assert_eq!(storage.best_hash, block.hash()); + assert_eq!(storage.best_header, block); + + storage.clean_caches(); + assert_eq!(storage.headers.get(&block.number), None); + assert_eq!(storage.hash_to_number.get(&block.hash()), None); + assert_eq!(storage.bodies.get(&block.hash()), None); + } } diff --git a/crates/bsc/engine/src/task.rs b/crates/bsc/engine/src/task.rs index 280a289125..73689f7cf6 100644 --- a/crates/bsc/engine/src/task.rs +++ b/crates/bsc/engine/src/task.rs @@ -1,5 +1,5 @@ use crate::{client::ParliaClient, Storage}; -use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; +use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus, MIN_BLOCKS_FOR_PIPELINE_RUN}; use reth_bsc_consensus::Parlia; use reth_chainspec::ChainSpec; use reth_engine_primitives::EngineTypes; @@ -10,7 +10,6 @@ use reth_network_p2p::{ priority::Priority, }; use reth_primitives::{Block, BlockBody, BlockHashOrNumber, SealedHeader, B256}; -use reth_primitives_traits::constants::EPOCH_SLOTS; use reth_provider::{BlockReaderIdExt, CanonChainTracker, ParliaProvider}; use reth_rpc_types::engine::ForkchoiceState; use std::{ @@ -210,7 +209,7 @@ impl< // fetch header and verify let fetch_header_result = match timeout( fetch_header_timeout_duration, - block_fetcher.get_header_with_priority(info.block_hash, Priority::High), + block_fetcher.get_header_with_priority(info.block_hash, Priority::Normal), ) .await { @@ -267,10 +266,25 @@ impl< if !is_valid_header { continue }; + // check if the header is the same as the block hash + // that probably means the block is not sealed yet + let block_hash = match info.block_hash { + BlockHashOrNumber::Hash(hash) => hash, + BlockHashOrNumber::Number(number) => { + // trigger by the interval tick, can only trust the number + if number != sealed_header.number { + continue; + } + sealed_header.hash() + } + }; + if sealed_header.hash() != block_hash { + continue; + } let mut disconnected_headers = Vec::new(); - disconnected_headers.push(sealed_header.clone()); - let pipeline_sync = (trusted_header.number + EPOCH_SLOTS) < sealed_header.number; + let pipeline_sync = + (trusted_header.number + MIN_BLOCKS_FOR_PIPELINE_RUN) < sealed_header.number; if !pipeline_sync && (sealed_header.number - 1) > trusted_header.number { let fetch_headers_result = match timeout( fetch_header_timeout_duration, @@ -294,23 +308,25 @@ impl< } let headers = fetch_headers_result.unwrap().into_data(); - for header in headers { - let sealed_header = header.clone().seal_slow(); - let predicted_timestamp = trusted_header.timestamp + - block_interval * (sealed_header.number - 1 - trusted_header.number); - if consensus - .validate_header_with_predicted_timestamp( - &sealed_header, - predicted_timestamp, - ) - .is_err() - { - trace!(target: "consensus::parlia", "Invalid header"); - continue + if headers.is_empty() { + continue + } + let mut parent_hash = sealed_header.parent_hash; + for (i, _) in headers.iter().enumerate() { + let sealed_header = headers[i].clone().seal_slow(); + if sealed_header.hash() != parent_hash { + break; } + parent_hash = sealed_header.parent_hash; disconnected_headers.push(sealed_header.clone()); } + // check if the length of the disconnected headers is the same as the headers + // if not, the headers are not valid + if disconnected_headers.len() != headers.len() { + continue; + } + // check last header.parent_hash is match the trusted header if !disconnected_headers.is_empty() && disconnected_headers.last().unwrap().parent_hash != trusted_header.hash() @@ -319,6 +335,8 @@ impl< } }; + disconnected_headers.insert(0, sealed_header.clone()); + disconnected_headers.reverse(); // cache header and block let mut storage = storage.write().await; if info.block.is_some() { @@ -327,7 +345,6 @@ impl< BlockBody::from(info.block.clone().unwrap()), ); } - for header in disconnected_headers { storage.insert_new_header(header.clone()); let result = @@ -340,9 +357,11 @@ impl< trusted_header: trusted_header.clone(), })); if result.is_err() { - error!(target: "consensus::parlia", "Failed to send new block event to fork choice"); + error!(target: "consensus::parlia", "Failed to send new block event to + fork choice"); } } + drop(storage); let result = chain_tracker_tx.send(ForkChoiceMessage::NewHeader(NewHeaderEvent { header: sealed_header.clone(), @@ -352,7 +371,6 @@ impl< if result.is_err() { error!(target: "consensus::parlia", "Failed to send new block event to chain tracker"); } - drop(storage); } }); info!(target: "consensus::parlia", "started listening to network block event") diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 9673f6205d..08cf1f2e05 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1726,13 +1726,22 @@ where BlockchainTreeAction::InsertDownloadedPayload { block } => { let downloaded_num_hash = block.num_hash(); + let start = Instant::now(); match self.blockchain.insert_block_without_senders( - block, + block.clone(), BlockValidationKind::SkipStateRootValidation, ) { Ok(status) => { match status { InsertPayloadOk::Inserted(BlockStatus::Valid(_)) => { + let elapsed = start.elapsed(); + let event_block = Arc::new(block); + let event = BeaconConsensusEngineEvent::CanonicalBlockAdded( + event_block, + elapsed, + ); + self.event_sender.notify(event); + // block is connected to the canonical chain and is valid. // if it's not connected to current canonical head, the state root // has not been validated. From 0289b941b47378f8489961055890ed25ad2c9360 Mon Sep 17 00:00:00 2001 From: forcodedancing Date: Tue, 30 Jul 2024 09:39:55 +0800 Subject: [PATCH 4/7] chore: refine readme file (#87) --- README.md | 104 ++++++++++++++++++++++++------------------------------ 1 file changed, 47 insertions(+), 57 deletions(-) diff --git a/README.md b/README.md index d5f87d9279..467062cb30 100644 --- a/README.md +++ b/README.md @@ -53,26 +53,29 @@ make install-op ## Run Reth for BSC +### Hardware Requirements + +* CPU with 16+ cores +* 128GB RAM +* High-performance NVMe SSD with at least 4TB of free space for full node and 8TB of free space for archive node +* A broadband internet connection with upload/download speeds of 25 MB/s + +### Steps to Run bsc-reth + The command below is for an archive node. To run a full node, simply add the `--full` tag. ```shell -# for testnet -export network=bsc-testnet - # for mainnet -# export network=bsc +export network=bsc + +# for testnet +# export network=bsc-testnet ./target/release/bsc-reth node \ --datadir=./datadir \ --chain=${network} \ --http \ - --http.addr=0.0.0.0 \ - --http.port=8545 \ --http.api="eth, net, txpool, web3, rpc" \ - --ws \ - --ws.addr=0.0.0.0 \ - --ws.port=8546 \ - --nat=any \ --log.file.directory ./datadir/logs ``` @@ -81,11 +84,11 @@ You can run `bsc-reth --help` for command explanations. For running bsc-reth with docker, please use the following command: ```shell -# for testnet -export network=bsc-testnet - # for mainnet -# export network=bsc +export network=bsc + +# for testnet +# export network=bsc-testnet # check this for version of the docker image, https://github.com/bnb-chain/reth/pkgs/container/bsc-reth export version=latest @@ -93,24 +96,25 @@ export version=latest # the directory where reth data will be stored export data_dir=/xxx/xxx -docker run -d -p 8545:8545 -p 8546:8546 -p 30303:30303 -p 30303:30303/udp -v ${data_dir}:/data \ +docker run -d -p 8545:8545 -p 30303:30303 -p 30303:30303/udp -v ${data_dir}:/data \ --name bsc-reth ghcr.io/bnb-chain/bsc-reth:${version} node \ --datadir=/data \ --chain=${network} \ --http \ - --http.addr=0.0.0.0 \ - --http.port=8545 \ --http.api="eth, net, txpool, web3, rpc" \ - --ws \ - --ws.addr=0.0.0.0 \ - --ws.port=8546 \ - --nat=any \ --log.file.directory /data/logs ``` +### Snapshots + +There are snapshots available from the community, you can use a snapshot to reduce the sync time for catching up. + +* [fuzzland snapshot](https://github.com/fuzzland/snapshots) + ## Run Reth for opBNB -The op-reth can function as both a full node and an archive node. Due to its unique storage advantages, it is primarily utilized for running archive nodes. +The op-reth can function as both a full node and an archive node. Due to its unique storage advantages, it is primarily +utilized for running archive nodes. ### Hardware Requirements @@ -133,16 +137,16 @@ git clone https://github.com/bnb-chain/opbnb cd opbnb make op-node +# for mainnet +export network=mainnet +export L1_RPC=https://bsc-dataseed.bnbchain.org +export P2P_BOOTNODES="enr:-J24QA9sgVxbZ0KoJ7-1gx_szfc7Oexzz7xL2iHS7VMHGj2QQaLc_IQZmFthywENgJWXbApj7tw7BiouKDOZD4noWEWGAYppffmvgmlkgnY0gmlwhDbjSM6Hb3BzdGFja4PMAQCJc2VjcDI1NmsxoQKetGQX7sXd4u8hZr6uayTZgHRDvGm36YaryqZkgnidS4N0Y3CCIyuDdWRwgiMs,enr:-J24QPSZMaGw3NhO6Ll25cawknKcOFLPjUnpy72HCkwqaHBKaaR9ylr-ejx20INZ69BLLj334aEqjNHKJeWhiAdVcn-GAYv28FmZgmlkgnY0gmlwhDTDWQOHb3BzdGFja4PMAQCJc2VjcDI1NmsxoQJ-_5GZKjs7jaB4TILdgC8EwnwyL3Qip89wmjnyjvDDwoN0Y3CCIyuDdWRwgiMs" + # for testnet # it's better to replace the L1_RPC with your own BSC Testnet RPC Endpoint for stability -export network=testnet -export L1_RPC=https://bsc-testnet.bnbchain.org -export P2P_BOOTNODES="enr:-J24QGQBeMsXOaCCaLWtNFSfb2Gv50DjGOKToH2HUTAIn9yXImowlRoMDNuPNhSBZNQGCCE8eAl5O3dsONuuQp5Qix2GAYjB7KHSgmlkgnY0gmlwhDREiqaHb3BzdGFja4PrKwCJc2VjcDI1NmsxoQL4I9wpEVDcUb8bLWu6V8iPoN5w8E8q-GrS5WUCygYUQ4N0Y3CCIyuDdWRwgiMr,enr:-J24QJKXHEkIhy0tmIk2EscMZ2aRrivNsZf_YhgIU51g4ZKHWY0BxW6VedRJ1jxmneW9v7JjldPOPpLkaNSo6cXGFxqGAYpK96oCgmlkgnY0gmlwhANzx96Hb3BzdGFja4PrKwCJc2VjcDI1NmsxoQMOCzUFffz04eyDrmkbaSCrMEvLvn5O4RZaZ5k1GV4wa4N0Y3CCIyuDdWRwgiMr" - -# for mainnet -# export network=mainnet -# export L1_RPC=https://bsc-dataseed.bnbchain.org -# export P2P_BOOTNODES="enr:-J24QA9sgVxbZ0KoJ7-1gx_szfc7Oexzz7xL2iHS7VMHGj2QQaLc_IQZmFthywENgJWXbApj7tw7BiouKDOZD4noWEWGAYppffmvgmlkgnY0gmlwhDbjSM6Hb3BzdGFja4PMAQCJc2VjcDI1NmsxoQKetGQX7sXd4u8hZr6uayTZgHRDvGm36YaryqZkgnidS4N0Y3CCIyuDdWRwgiMs,enr:-J24QPSZMaGw3NhO6Ll25cawknKcOFLPjUnpy72HCkwqaHBKaaR9ylr-ejx20INZ69BLLj334aEqjNHKJeWhiAdVcn-GAYv28FmZgmlkgnY0gmlwhDTDWQOHb3BzdGFja4PMAQCJc2VjcDI1NmsxoQJ-_5GZKjs7jaB4TILdgC8EwnwyL3Qip89wmjnyjvDDwoN0Y3CCIyuDdWRwgiMs" +# export network=testnet +# export L1_RPC=https://bsc-testnet.bnbchain.org +# export P2P_BOOTNODES="enr:-J24QGQBeMsXOaCCaLWtNFSfb2Gv50DjGOKToH2HUTAIn9yXImowlRoMDNuPNhSBZNQGCCE8eAl5O3dsONuuQp5Qix2GAYjB7KHSgmlkgnY0gmlwhDREiqaHb3BzdGFja4PrKwCJc2VjcDI1NmsxoQL4I9wpEVDcUb8bLWu6V8iPoN5w8E8q-GrS5WUCygYUQ4N0Y3CCIyuDdWRwgiMr,enr:-J24QJKXHEkIhy0tmIk2EscMZ2aRrivNsZf_YhgIU51g4ZKHWY0BxW6VedRJ1jxmneW9v7JjldPOPpLkaNSo6cXGFxqGAYpK96oCgmlkgnY0gmlwhANzx96Hb3BzdGFja4PrKwCJc2VjcDI1NmsxoQMOCzUFffz04eyDrmkbaSCrMEvLvn5O4RZaZ5k1GV4wa4N0Y3CCIyuDdWRwgiMr" ./op-node/bin/op-node \ --l1.trustrpc \ @@ -175,13 +179,13 @@ op-reth. The command below is for an archive node. To run a full node, simply add the `--full` tag. ```shell -# for testnet -export network=testnet -export L2_RPC=https://opbnb-testnet-rpc.bnbchain.org - # for mainnet -# export network=mainnet -# export L2_RPC=https://opbnb-mainnet-rpc.bnbchain.org +export network=mainnet +export L2_RPC=https://opbnb-mainnet-rpc.bnbchain.org + +# for testnet +# export network=testnet +# export L2_RPC=https://opbnb-testnet-rpc.bnbchain.org ./target/release/op-reth node \ --datadir=./datadir \ @@ -191,14 +195,7 @@ export L2_RPC=https://opbnb-testnet-rpc.bnbchain.org --authrpc.port=8551 \ --authrpc.jwtsecret=./jwt.txt \ --http \ - --http.addr=0.0.0.0 \ - --http.port=8545 \ --http.api="eth, net, txpool, web3, rpc" \ - --ws \ - --ws.addr=0.0.0.0 \ - --ws.port=8546 \ - --builder.gaslimit=150000000 \ - --nat=any \ --log.file.directory ./datadir/logs ``` @@ -208,13 +205,13 @@ found [here](https://docs.bnbchain.org/opbnb-docs/docs/tutorials/running-a-local For running op-reth with docker, please use the following command: ```shell -# for testnet -export network=testnet -export L2_RPC=https://opbnb-testnet-rpc.bnbchain.org - # for mainnet -# export network=mainnet -# export L2_RPC=https://opbnb-mainnet-rpc.bnbchain.org +export network=mainnet +export L2_RPC=https://opbnb-mainnet-rpc.bnbchain.org + +# for testnet +# export network=testnet +# export L2_RPC=https://opbnb-testnet-rpc.bnbchain.org # check this for version of the docker image, https://github.com/bnb-chain/reth/pkgs/container/op-reth export version=latest @@ -225,7 +222,7 @@ export data_dir=/xxx/xxx # the directory where the jwt.txt file is stored export jwt_dir=/xxx/xxx -docker run -d -p 8545:8545 -p 8546:8546 -p 30303:30303 -p 30303:30303/udp -v ${data_dir}:/data -v ${jwt_dir}:/jwt \ +docker run -d -p 8545:8545 -p 30303:30303 -p 30303:30303/udp -v ${data_dir}:/data -v ${jwt_dir}:/jwt \ --name op-reth ghcr.io/bnb-chain/op-reth:${version} node \ --datadir=/data \ --chain=opbnb-${network} \ @@ -234,14 +231,7 @@ docker run -d -p 8545:8545 -p 8546:8546 -p 30303:30303 -p 30303:30303/udp -v ${d --authrpc.port=8551 \ --authrpc.jwtsecret=/jwt/jwt.txt \ --http \ - --http.addr=0.0.0.0 \ - --http.port=8545 \ --http.api="eth, net, txpool, web3, rpc" \ - --ws \ - --ws.addr=0.0.0.0 \ - --ws.port=8546 \ - --builder.gaslimit=150000000 \ - --nat=any \ --log.file.directory /data/logs ``` From 2447722ddb0738ecbe4ea49f232fc3904cad00cf Mon Sep 17 00:00:00 2001 From: KeefeL <90749943+KeefeL@users.noreply.github.com> Date: Wed, 31 Jul 2024 09:32:05 +0800 Subject: [PATCH 5/7] doc: fix op-reth running tutorial (#90) Co-authored-by: Keefe Liu --- README.md | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 467062cb30..ed85880cfe 100644 --- a/README.md +++ b/README.md @@ -126,8 +126,7 @@ utilized for running archive nodes. ### Steps to Run op-reth The op-reth is an [execution client](https://ethereum.org/en/developers/docs/nodes-and-clients/#execution-clients) for -opBNB. -You need to run op-node along with op-reth to synchronize with the opBNB network. +opBNB. You need to run op-node along with op-reth to synchronize with the opBNB network. Here is the quick command for running the op-node. For more details, refer to the [opbnb repository](https://github.com/bnb-chain/opbnb). @@ -171,12 +170,16 @@ export P2P_BOOTNODES="enr:-J24QA9sgVxbZ0KoJ7-1gx_szfc7Oexzz7xL2iHS7VMHGj2QQaLc_I --rpc.enable-admin \ --l1=${L1_RPC} \ --l2=http://localhost:8551 \ - --l2.jwt-secret=./jwt.txt + --l2.jwt-secret=./jwt.txt \ + --syncmode=execution-layer ``` -Copy the JWT file generated when running the op-node to the current workspace. Here is a quick command for running -op-reth. -The command below is for an archive node. To run a full node, simply add the `--full` tag. +**It's important to mention that op-node and op-reth both need the same jwt.txt file.** +To do this, switch to the op-reth workdir and paste the jwt.txt file created during op-node execution into the current +workspace. + +Here is a quick command for running op-reth. The command below is for an archive node, to run a full node, simply add +the `--full` tag. ```shell # for mainnet From 3f00f1d6cde83f186a0a2ff97aac21c908587223 Mon Sep 17 00:00:00 2001 From: dylanhuang Date: Wed, 31 Jul 2024 15:08:40 +0800 Subject: [PATCH 6/7] fix: fork block handling in parlia engine and rewinding blocks to the block before the finalized block issue (#89) * fix: fork block handling in parlia engine * chore: add more debug logs * fix: getting finalized hash from snapshot * fix: enhance the fork block handling * fix: skip static file produce if finalized number > tip number * chore: fix lint --- crates/bsc/engine/src/lib.rs | 41 ++++++++++++++----- crates/bsc/engine/src/task.rs | 34 +++++++++------ .../beacon/src/engine/hooks/static_file.rs | 9 ++++ 3 files changed, 61 insertions(+), 23 deletions(-) diff --git a/crates/bsc/engine/src/lib.rs b/crates/bsc/engine/src/lib.rs index a34e74768f..52ac259603 100644 --- a/crates/bsc/engine/src/lib.rs +++ b/crates/bsc/engine/src/lib.rs @@ -46,7 +46,8 @@ pub struct ParliaEngineBuilder { network_block_event_rx: Arc>>, fetch_client: FetchClient, provider: Provider, - parlia_provider: P, + parlia: Parlia, + snapshot_reader: SnapshotReader

, } // === impl ParliaEngineBuilder === @@ -72,13 +73,26 @@ where .ok() .flatten() .unwrap_or_else(|| chain_spec.sealed_genesis_header()); + let parlia = Parlia::new(chain_spec.clone(), cfg.clone()); + + let mut finalized_hash = None; + let mut safe_hash = None; + let snapshot_reader = + SnapshotReader::new(Arc::new(parlia_provider), Arc::new(parlia.clone())); + let snapshot_result = snapshot_reader.snapshot(&latest_header, None); + if snapshot_result.is_ok() { + let snap = snapshot_result.unwrap(); + finalized_hash = Some(snap.vote_data.source_hash); + safe_hash = Some(snap.vote_data.target_hash); + } Self { chain_spec, cfg, provider, - parlia_provider, - storage: Storage::new(latest_header), + snapshot_reader, + parlia, + storage: Storage::new(latest_header, finalized_hash, safe_hash), to_engine, network_block_event_rx, fetch_client, @@ -96,16 +110,16 @@ where network_block_event_rx, fetch_client, provider, - parlia_provider, + parlia, + snapshot_reader, } = self; let parlia_client = ParliaClient::new(storage.clone(), fetch_client); - let parlia = Parlia::new(chain_spec.clone(), cfg.clone()); if start_engine_task { ParliaEngineTask::start( chain_spec, - parlia.clone(), + parlia, provider, - SnapshotReader::new(Arc::new(parlia_provider), Arc::new(parlia)), + snapshot_reader, to_engine, network_block_event_rx, storage, @@ -128,7 +142,14 @@ pub(crate) struct Storage { impl Storage { /// Initializes the [Storage] with the given best block. This should be initialized with the /// highest block in the chain, if there is a chain already stored on-disk. - fn new(best_block: SealedHeader) -> Self { + fn new( + best_block: SealedHeader, + finalized_hash: Option, + safe_hash: Option, + ) -> Self { + let best_finalized_hash = finalized_hash.unwrap_or_default(); + let best_safe_hash = safe_hash.unwrap_or_default(); + let mut storage = StorageInner { best_hash: best_block.hash(), best_block: best_block.number, @@ -136,8 +157,8 @@ impl Storage { headers: LimitedHashSet::new(STORAGE_CACHE_NUM), hash_to_number: LimitedHashSet::new(STORAGE_CACHE_NUM), bodies: LimitedHashSet::new(STORAGE_CACHE_NUM), - best_finalized_hash: B256::default(), - best_safe_hash: B256::default(), + best_finalized_hash, + best_safe_hash, }; storage.headers.put(best_block.number, best_block.clone()); storage.hash_to_number.put(best_block.hash(), best_block.number); diff --git a/crates/bsc/engine/src/task.rs b/crates/bsc/engine/src/task.rs index 73689f7cf6..3201c43bd2 100644 --- a/crates/bsc/engine/src/task.rs +++ b/crates/bsc/engine/src/task.rs @@ -20,6 +20,7 @@ use std::{ }; use tokio::sync::Mutex; +use reth_rpc_types::{BlockId, RpcBlockHash}; use tokio::{ signal, sync::{ @@ -40,7 +41,7 @@ enum ForkChoiceMessage { #[derive(Debug, Clone)] struct NewHeaderEvent { header: SealedHeader, - trusted_header: SealedHeader, + local_header: SealedHeader, pipeline_sync: bool, } @@ -147,6 +148,7 @@ impl< loop { let read_storage = storage.read().await; let best_header = read_storage.best_header.clone(); + let finalized_hash = read_storage.best_finalized_hash; drop(read_storage); let mut engine_rx_guard = engine_rx.lock().await; let mut info = BlockInfo { @@ -231,19 +233,25 @@ impl< } } let latest_header = header_option.unwrap(); - - // skip if parent hash is not equal to best hash - if latest_header.number == best_header.number + 1 && - latest_header.parent_hash != best_header.hash() - { - continue; - } - - let trusted_header = client + let finalized_header = client + .sealed_header_by_id(BlockId::Hash(RpcBlockHash::from(finalized_hash))) + .ok() + .flatten() + .unwrap_or_else(|| chain_spec.sealed_genesis_header()); + debug!(target: "consensus::parlia", { finalized_header_number = ?finalized_header.number, finalized_header_hash = ?finalized_header.hash() }, "Latest finalized header"); + let latest_unsafe_header = client .latest_header() .ok() .flatten() .unwrap_or_else(|| chain_spec.sealed_genesis_header()); + debug!(target: "consensus::parlia", { latest_unsafe_header_number = ?latest_unsafe_header.number, latest_unsafe_header_hash = ?latest_unsafe_header.hash() }, "Latest unsafe header"); + + let mut trusted_header = latest_unsafe_header.clone(); + // if parent hash is not equal to latest unsafe hash + // may be a fork chain detected, we need to trust the finalized header + if latest_header.parent_hash != latest_unsafe_header.hash() { + trusted_header = finalized_header.clone(); + } // verify header and timestamp // predict timestamp is the trusted header timestamp plus the block interval times @@ -354,7 +362,7 @@ impl< // and finalized hash. // this can make Block Sync Engine to use pipeline sync mode. pipeline_sync, - trusted_header: trusted_header.clone(), + local_header: latest_unsafe_header.clone(), })); if result.is_err() { error!(target: "consensus::parlia", "Failed to send new block event to @@ -366,7 +374,7 @@ impl< let result = chain_tracker_tx.send(ForkChoiceMessage::NewHeader(NewHeaderEvent { header: sealed_header.clone(), pipeline_sync, - trusted_header: trusted_header.clone(), + local_header: latest_unsafe_header.clone(), })); if result.is_err() { error!(target: "consensus::parlia", "Failed to send new block event to chain tracker"); @@ -476,7 +484,7 @@ impl< } match msg.unwrap() { ForkChoiceMessage::NewHeader(event) => { - let new_header = event.trusted_header; + let new_header = event.local_header; let snap = match snapshot_reader.snapshot(&new_header, None) { Ok(snap) => snap, diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index b52812b53a..54dd1ccb9a 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -142,6 +142,15 @@ impl EngineHook for StaticFileHook { return Poll::Pending }; + // The chain state may be rewind, if the finalized block number is greater than the tip + // block number. In this case, we should wait until the finalized block number is + // less than or equal to the tip block number. To prevent the static file producer + // from producing static files for the wrong block and incrementing the index number. + if finalized_block_number >= ctx.tip_block_number { + trace!(target: "consensus::engine::hooks::static_file", ?ctx, "Finalized block number is greater than tip number"); + return Poll::Pending + } + // Try to spawn a static_file_producer match self.try_spawn_static_file_producer(finalized_block_number)? { Some(EngineHookEvent::NotReady) => return Poll::Pending, From acee0915cde61b56c63902d99158a0e387e136fa Mon Sep 17 00:00:00 2001 From: zoro <296179868@qq.com> Date: Wed, 31 Jul 2024 15:29:27 +0800 Subject: [PATCH 7/7] docs: add change logs for v1.0.0 (#91) --- CHANGELOG.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f8977085ae..18a8ff49e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## v1.0.0 +The Reth is entering production-ready v1.0.0. Thanks to the Paradigm team for their continuous iterations on Reth, +providing the community with a highly scalable, modular, high-performance, and feature-rich client. +We stand on the shoulders of giants, enabling us to swiftly launch the Reth supporting BSC and opBNB network versions. + +### BUGFIX +* [\#75](https://github.com/bnb-chain/reth/pull/75) ci: fix release job +* [\#76](https://github.com/bnb-chain/reth/pull/76) chore: update max db size +* [\#74](https://github.com/bnb-chain/reth/pull/74) fix: add sidecars to db when doing insert_block +* [\#79](https://github.com/bnb-chain/reth/pull/79) fix: read sidecars from table in get_take_block_range +* [\#81](https://github.com/bnb-chain/reth/pull/81) fix: check parent hash of disconnected headers +* [\#83](https://github.com/bnb-chain/reth/pull/83) fix: parlia live sync issue +* [\#89](https://github.com/bnb-chain/reth/pull/89) fix: fork block handling in parlia engine and rewinding blocks to the block before the finalized block issue + +### Docs +* [\#87](https://github.com/bnb-chain/reth/pull/87) chore: refine readme file +* [\#90](https://github.com/bnb-chain/reth/pull/90) doc: fix op-reth running tutorial + + ## v1.0.0-rc.2 This release is a release candidate for the v1.0.0 release. It includes a number of new features and bug fixes.