Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

primitives: use alloy Header struct #10691

Merged
merged 16 commits into from
Sep 23, 2024
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

13 changes: 11 additions & 2 deletions crates/chain-state/src/in_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -557,9 +557,18 @@ impl CanonicalInMemoryState {
index: index as u64,
block_hash: block_state.hash(),
block_number: block_state.block().block.number,
base_fee: block_state.block().block().header.base_fee_per_gas,
base_fee: block_state
.block()
.block()
.header
.base_fee_per_gas
.map(|base_fee| base_fee as u64),
timestamp: block_state.block().block.timestamp,
excess_blob_gas: block_state.block().block.excess_blob_gas,
excess_blob_gas: block_state
.block()
.block
.excess_blob_gas
.map(|excess_blob| excess_blob as u64),
};
return Some((tx.clone(), meta))
}
Expand Down
10 changes: 5 additions & 5 deletions crates/chainspec/src/spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -307,19 +307,19 @@ impl ChainSpec {
};

Header {
gas_limit: self.genesis.gas_limit as u64,
gas_limit: self.genesis.gas_limit,
difficulty: self.genesis.difficulty,
nonce: self.genesis.nonce,
nonce: self.genesis.nonce.into(),
extra_data: self.genesis.extra_data.clone(),
state_root: state_root_ref_unhashed(&self.genesis.alloc),
timestamp: self.genesis.timestamp,
mix_hash: self.genesis.mix_hash,
beneficiary: self.genesis.coinbase,
base_fee_per_gas,
base_fee_per_gas: base_fee_per_gas.map(Into::into),
withdrawals_root,
parent_beacon_block_root,
blob_gas_used,
excess_blob_gas,
blob_gas_used: blob_gas_used.map(Into::into),
excess_blob_gas: excess_blob_gas.map(Into::into),
requests_root,
..Default::default()
}
Expand Down
32 changes: 17 additions & 15 deletions crates/consensus/common/src/validation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ use reth_primitives::{
pub const fn validate_header_gas(header: &Header) -> Result<(), ConsensusError> {
if header.gas_used > header.gas_limit {
return Err(ConsensusError::HeaderGasUsedExceedsGasLimit {
gas_used: header.gas_used,
gas_limit: header.gas_limit,
gas_used: header.gas_used as u64,
gas_limit: header.gas_limit as u64,
})
}
Ok(())
Expand Down Expand Up @@ -65,7 +65,8 @@ pub fn validate_shanghai_withdrawals(block: &SealedBlock) -> Result<(), Consensu
pub fn validate_cancun_gas(block: &SealedBlock) -> Result<(), ConsensusError> {
// Check that the blob gas used in the header matches the sum of the blob gas used by each
// blob tx
let header_blob_gas_used = block.blob_gas_used.ok_or(ConsensusError::BlobGasUsedMissing)?;
let header_blob_gas_used =
block.blob_gas_used.ok_or(ConsensusError::BlobGasUsedMissing)? as u64;
let total_blob_gas = block.blob_gas_used();
if total_blob_gas != header_blob_gas_used {
return Err(ConsensusError::BlobGasUsedDiff(GotExpected {
Expand Down Expand Up @@ -150,25 +151,25 @@ pub fn validate_4844_header_standalone(header: &Header) -> Result<(), ConsensusE
return Err(ConsensusError::ParentBeaconBlockRootMissing)
}

if blob_gas_used > MAX_DATA_GAS_PER_BLOCK {
if blob_gas_used as u64 > MAX_DATA_GAS_PER_BLOCK {
return Err(ConsensusError::BlobGasUsedExceedsMaxBlobGasPerBlock {
blob_gas_used,
blob_gas_used: blob_gas_used as u64,
max_blob_gas_per_block: MAX_DATA_GAS_PER_BLOCK,
})
}

if blob_gas_used % DATA_GAS_PER_BLOB != 0 {
if blob_gas_used as u64 % DATA_GAS_PER_BLOB != 0 {
return Err(ConsensusError::BlobGasUsedNotMultipleOfBlobGasPerBlob {
blob_gas_used,
blob_gas_used: blob_gas_used as u64,
blob_gas_per_blob: DATA_GAS_PER_BLOB,
})
}

// `excess_blob_gas` must also be a multiple of `DATA_GAS_PER_BLOB`. This will be checked later
// (via `calculate_excess_blob_gas`), but it doesn't hurt to catch the problem sooner.
if excess_blob_gas % DATA_GAS_PER_BLOB != 0 {
if excess_blob_gas as u64 % DATA_GAS_PER_BLOB != 0 {
return Err(ConsensusError::ExcessBlobGasNotMultipleOfBlobGasPerBlob {
excess_blob_gas,
excess_blob_gas: excess_blob_gas as u64,
blob_gas_per_blob: DATA_GAS_PER_BLOB,
})
}
Expand Down Expand Up @@ -224,7 +225,7 @@ pub fn validate_against_parent_eip1559_base_fee(
chain_spec: &ChainSpec,
) -> Result<(), ConsensusError> {
if chain_spec.fork(EthereumHardfork::London).active_at_block(header.number) {
let base_fee = header.base_fee_per_gas.ok_or(ConsensusError::BaseFeeMissing)?;
let base_fee = header.base_fee_per_gas.ok_or(ConsensusError::BaseFeeMissing)? as u64;

let expected_base_fee =
if chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number) {
Expand All @@ -234,7 +235,7 @@ pub fn validate_against_parent_eip1559_base_fee(
// them.
parent
.next_block_base_fee(chain_spec.base_fee_params_at_timestamp(header.timestamp))
.ok_or(ConsensusError::BaseFeeMissing)?
.ok_or(ConsensusError::BaseFeeMissing)? as u64
};
if expected_base_fee != base_fee {
return Err(ConsensusError::BaseFeeDiff(GotExpected {
Expand All @@ -253,7 +254,7 @@ pub const fn validate_against_parent_timestamp(
header: &Header,
parent: &Header,
) -> Result<(), ConsensusError> {
if header.is_timestamp_in_past(parent.timestamp) {
if header.timestamp <= parent.timestamp {
return Err(ConsensusError::TimestampIsInPast {
parent_timestamp: parent.timestamp,
timestamp: header.timestamp,
Expand All @@ -276,13 +277,14 @@ pub fn validate_against_parent_4844(
// > are evaluated as 0.
//
// This means in the first post-fork block, calculate_excess_blob_gas will return 0.
let parent_blob_gas_used = parent.blob_gas_used.unwrap_or(0);
let parent_excess_blob_gas = parent.excess_blob_gas.unwrap_or(0);
let parent_blob_gas_used = parent.blob_gas_used.unwrap_or(0) as u64;
let parent_excess_blob_gas = parent.excess_blob_gas.unwrap_or(0) as u64;

if header.blob_gas_used.is_none() {
return Err(ConsensusError::BlobGasUsedMissing)
}
let excess_blob_gas = header.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)?;
let excess_blob_gas =
header.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)? as u64;

let expected_excess_blob_gas =
calculate_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used);
Expand Down
22 changes: 12 additions & 10 deletions crates/ethereum/consensus/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,33 +52,35 @@ impl EthBeaconConsensus {
// Determine the parent gas limit, considering elasticity multiplier on the London fork.
let parent_gas_limit =
if self.chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number) {
parent.gas_limit *
parent.gas_limit as u64 *
self.chain_spec
.base_fee_params_at_timestamp(header.timestamp)
.elasticity_multiplier as u64
} else {
parent.gas_limit
parent.gas_limit as u64
};

// Check for an increase in gas limit beyond the allowed threshold.
if header.gas_limit > parent_gas_limit {
if header.gas_limit - parent_gas_limit >= parent_gas_limit / 1024 {
if header.gas_limit as u64 > parent_gas_limit {
if header.gas_limit as u64 - parent_gas_limit >= parent_gas_limit / 1024 {
return Err(ConsensusError::GasLimitInvalidIncrease {
parent_gas_limit,
child_gas_limit: header.gas_limit,
child_gas_limit: header.gas_limit as u64,
})
}
}
// Check for a decrease in gas limit beyond the allowed threshold.
else if parent_gas_limit - header.gas_limit >= parent_gas_limit / 1024 {
else if parent_gas_limit - header.gas_limit as u64 >= parent_gas_limit / 1024 {
return Err(ConsensusError::GasLimitInvalidDecrease {
parent_gas_limit,
child_gas_limit: header.gas_limit,
child_gas_limit: header.gas_limit as u64,
})
}
// Check if the self gas limit is below the minimum required limit.
else if header.gas_limit < MINIMUM_GAS_LIMIT {
return Err(ConsensusError::GasLimitInvalidMinimum { child_gas_limit: header.gas_limit })
else if header.gas_limit < MINIMUM_GAS_LIMIT.into() {
return Err(ConsensusError::GasLimitInvalidMinimum {
child_gas_limit: header.gas_limit as u64,
})
}

Ok(())
Expand Down Expand Up @@ -161,7 +163,7 @@ impl Consensus for EthBeaconConsensus {
return Err(ConsensusError::TheMergeDifficultyIsNotZero)
}

if header.nonce != 0 {
if !header.nonce.is_zero() {
return Err(ConsensusError::TheMergeNonceIsNotZero)
}

Expand Down
4 changes: 2 additions & 2 deletions crates/ethereum/consensus/src/validation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ pub fn validate_block_post_execution(
// Check if gas used matches the value set in header.
let cumulative_gas_used =
receipts.last().map(|receipt| receipt.cumulative_gas_used).unwrap_or(0);
if block.gas_used != cumulative_gas_used {
if block.gas_used as u64 != cumulative_gas_used {
return Err(ConsensusError::BlockGasUsed {
gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used },
gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used as u64 },
gas_spent_by_tx: gas_spent_by_transactions(receipts),
})
}
Expand Down
2 changes: 1 addition & 1 deletion crates/evm/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static {

// EIP-4844 excess blob gas of this block, introduced in Cancun
if let Some(excess_blob_gas) = header.excess_blob_gas {
block_env.set_blob_excess_gas_and_price(excess_blob_gas);
block_env.set_blob_excess_gas_and_price(excess_blob_gas as u64);
}
}

Expand Down
2 changes: 1 addition & 1 deletion crates/evm/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ impl ExecutorMetrics {
where
F: FnOnce(BlockExecutionInput<'_, BlockWithSenders>) -> R,
{
let gas_used = input.block.gas_used;
let gas_used = input.block.gas_used as u64;

// Execute the block and record the elapsed time.
let execute_start = Instant::now();
Expand Down
8 changes: 6 additions & 2 deletions crates/net/downloaders/src/file_client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@ use reth_network_p2p::{
};
use reth_network_peers::PeerId;
use reth_primitives::{
BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Header, SealedHeader, B256,
alloy_primitives::Sealable, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Header,
SealedHeader, B256,
};
use thiserror::Error;
use tokio::{fs::File, io::AsyncReadExt};
Expand Down Expand Up @@ -115,7 +116,10 @@ impl FileClient {
/// Clones and returns the highest header of this client has or `None` if empty. Seals header
/// before returning.
pub fn tip_header(&self) -> Option<SealedHeader> {
self.headers.get(&self.max_block()?).map(|h| h.clone().seal_slow())
self.headers.get(&self.max_block()?).map(|h| {
let sealed = h.clone().seal_slow();
SealedHeader::new(sealed.inner().clone(), sealed.seal())
})
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why does this have to be changed?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because in alloy seal_slow returns a generic Sealed<T> type which is different from the SealedHeader that we have in reth. So that to be compatible with reth at the moment we have to reconstruct a reth SealedHeader from the alloy Sealed<Header> type.

But I'm thinking about a follow up PR to replace our reth SealedHeader by the alloy Sealed<Header> primitive wdyt?

Copy link
Collaborator

@mattsse mattsse Sep 7, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this now has an additional clone, this is not great and looks just weird.
we don't need the seal function here if we only need the hash

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this can also be solved with a SealedHeader:: seal(Header) function

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe to reduce overhead, what would you say to replace also in this PR SealedHeader by alloy Sealed type?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
self.headers.get(&self.max_block()?).map(|h| {
let sealed = h.clone().seal_slow();
SealedHeader::new(sealed.inner().clone(), sealed.seal())
})
self.headers.get(&self.max_block()?).map(|h| {
let sealed = h.clone().seal_slow();
let (header, seal) = sealed.into_parts();
SealedHeader::new(header, seal);
})

I think we can just do this?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just fixed this but I think that in a follow up we can simply rm SealedHeader from reth and use Sealed instead, it will simplify everything no?

}

/// Returns true if all blocks are canonical (no gaps)
Expand Down
16 changes: 13 additions & 3 deletions crates/net/downloaders/src/headers/reverse_headers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,10 @@ use reth_network_p2p::{
priority::Priority,
};
use reth_network_peers::PeerId;
use reth_primitives::{BlockHashOrNumber, BlockNumber, GotExpected, Header, SealedHeader, B256};
use reth_primitives::{
alloy_primitives::Sealable, BlockHashOrNumber, BlockNumber, GotExpected, Header, SealedHeader,
B256,
};
use reth_tasks::{TaskSpawner, TokioTaskExecutor};
use std::{
cmp::{Ordering, Reverse},
Expand Down Expand Up @@ -247,7 +250,13 @@ where
) -> Result<(), ReverseHeadersDownloaderError> {
let mut validated = Vec::with_capacity(headers.len());

let sealed_headers = headers.into_par_iter().map(|h| h.seal_slow()).collect::<Vec<_>>();
let sealed_headers = headers
.into_par_iter()
.map(|h| {
let sealed = h.seal_slow();
SealedHeader::new(sealed.inner().clone(), sealed.seal())
})
tcoratger marked this conversation as resolved.
Show resolved Hide resolved
.collect::<Vec<_>>();
for parent in sealed_headers {
// Validate that the header is the parent header of the last validated header.
if let Some(validated_header) =
Expand Down Expand Up @@ -373,7 +382,8 @@ where
.into())
}

let target = headers.remove(0).seal_slow();
let sealed_target = headers.remove(0).seal_slow();
let target = SealedHeader::new(sealed_target.inner().clone(), sealed_target.seal());

match sync_target {
SyncTargetBlock::Hash(hash) | SyncTargetBlock::HashAndNumber { hash, .. } => {
Expand Down
26 changes: 21 additions & 5 deletions crates/net/p2p/src/full_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,9 @@ use crate::{
use reth_consensus::{Consensus, ConsensusError};
use reth_eth_wire_types::HeadersDirection;
use reth_network_peers::WithPeerId;
use reth_primitives::{BlockBody, GotExpected, Header, SealedBlock, SealedHeader, B256};
use reth_primitives::{
alloy_primitives::Sealable, BlockBody, GotExpected, Header, SealedBlock, SealedHeader, B256,
};
use std::{
cmp::Reverse,
collections::{HashMap, VecDeque},
Expand Down Expand Up @@ -181,8 +183,14 @@ where
ResponseResult::Header(res) => {
match res {
Ok(maybe_header) => {
let (peer, maybe_header) =
maybe_header.map(|h| h.map(|h| h.seal_slow())).split();
let (peer, maybe_header) = maybe_header
.map(|h| {
h.map(|h| {
let sealed = h.seal_slow();
SealedHeader::new(sealed.inner().clone(), sealed.seal())
})
})
.split();
if let Some(header) = maybe_header {
if header.hash() == this.hash {
this.header = Some(header);
Expand Down Expand Up @@ -482,8 +490,16 @@ where
}

fn on_headers_response(&mut self, headers: WithPeerId<Vec<Header>>) {
let (peer, mut headers_falling) =
headers.map(|h| h.into_iter().map(|h| h.seal_slow()).collect::<Vec<_>>()).split();
let (peer, mut headers_falling) = headers
.map(|h| {
h.into_iter()
.map(|h| {
let sealed = h.seal_slow();
SealedHeader::new(sealed.inner().clone(), sealed.seal())
})
.collect::<Vec<_>>()
})
.split();

// fill in the response if it's the correct length
if headers_falling.len() == self.count as usize {
Expand Down
Loading
Loading