diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 1d091d6793b..1b195a1ab78 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -33,12 +33,12 @@ serde_derive = "1.0.116" slog = { version = "2.5.2", features = ["max_level_trace"] } sloggers = { version = "2.1.1", features = ["json"] } slot_clock = { path = "../../common/slot_clock" } -eth2_hashing = "0.3.0" -eth2_ssz = "0.4.1" -eth2_ssz_types = "0.2.2" -eth2_ssz_derive = "0.3.1" +ethereum_hashing = "1.0.0-beta.2" +ethereum_ssz = "0.5.0" +ssz_types = "0.5.0" +ethereum_ssz_derive = "0.5.0" state_processing = { path = "../../consensus/state_processing" } -tree_hash = "0.4.1" +tree_hash = "0.5.0" types = { path = "../../consensus/types" } tokio = "1.14.0" tokio-stream = "0.1.3" diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index c44d096890b..7d3983353e0 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1024,7 +1024,7 @@ mod test { use super::*; use crate::test_utils::EphemeralHarnessType; use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; - use eth2_hashing::hash; + use ethereum_hashing::hash; use genesis::{ generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH, }; diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index f820622e57c..8b6c6b37409 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -1,7 +1,7 @@ use crate::metrics; use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService}; use eth2::lighthouse::Eth1SyncStatusData; -use eth2_hashing::hash; +use ethereum_hashing::hash; use int_to_bytes::int_to_bytes32; use slog::{debug, error, trace, Logger}; use ssz::{Decode, Encode}; diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index e0dd797bfaf..1148f063d8d 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -20,9 +20,9 @@ serde = { version = "1.0.116", features = ["derive"] } hex = "0.4.2" types = { path = "../../consensus/types"} merkle_proof = { path = "../../consensus/merkle_proof"} -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +tree_hash = "0.5.0" parking_lot = "0.12.0" slog = "2.5.2" superstruct = "0.5.0" diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 1b687a8b60e..3ed7ba65d6a 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -13,7 +13,7 @@ slog = "2.5.2" futures = "0.3.7" sensitive_url = { path = "../../common/sensitive_url" } reqwest = { version = "0.11.0", features = ["json","stream"] } -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" serde_json = "1.0.58" serde = { version = "1.0.116", features = ["derive"] } warp = { version = "0.3.2", features = ["tls"] } @@ -22,15 +22,15 @@ environment = { path = "../../lighthouse/environment" } bytes = "1.1.0" task_executor = { path = "../../common/task_executor" } hex = "0.4.2" -eth2_ssz = "0.4.1" -eth2_ssz_types = "0.2.2" +ethereum_ssz = "0.5.0" +ssz_types = "0.5.0" eth2 = { path = "../../common/eth2" } state_processing = { path = "../../consensus/state_processing" } superstruct = "0.6.0" lru = "0.7.1" exit-future = "0.2.0" -tree_hash = "0.4.1" -tree_hash_derive = { path = "../../consensus/tree_hash_derive"} +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" parking_lot = "0.12.0" slot_clock = { path = "../../common/slot_clock" } tempfile = "3.1.0" diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 009183d7ab9..2fbdd7a0374 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -140,11 +140,11 @@ pub enum BlockByNumberQuery<'a> { pub struct ExecutionBlock { #[serde(rename = "hash")] pub block_hash: ExecutionBlockHash, - #[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")] + #[serde(rename = "number", with = "serde_utils::u64_hex_be")] pub block_number: u64, pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, } @@ -170,13 +170,13 @@ pub struct ExecutionBlockWithTransactions { pub logs_bloom: FixedVector, #[serde(alias = "mixHash")] pub prev_randao: Hash256, - #[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")] + #[serde(rename = "number", with = "serde_utils::u64_hex_be")] pub block_number: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 962c130c54a..ebc35f21bcd 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -1013,7 +1013,7 @@ impl HttpJsonRpc { ) -> Result>>, Error> { #[derive(Serialize)] #[serde(transparent)] - struct Quantity(#[serde(with = "eth2_serde_utils::u64_hex_be")] u64); + struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] u64); let params = json!([Quantity(start), Quantity(count)]); let response: Vec>> = self diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index a1488e2dc9a..110f0f602ae 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -432,7 +432,7 @@ pub async fn handle_rpc( ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1 => { #[derive(Deserialize)] #[serde(transparent)] - struct Quantity(#[serde(with = "eth2_serde_utils::u64_hex_be")] pub u64); + struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] pub u64); let start = get_param::(params, 0) .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 87c56d360b2..8a7d224963e 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -16,9 +16,9 @@ eth1 = { path = "../eth1"} rayon = "1.4.1" state_processing = { path = "../../consensus/state_processing" } merkle_proof = { path = "../../consensus/merkle_proof" } -eth2_ssz = "0.4.1" -eth2_hashing = "0.3.0" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +ethereum_hashing = "1.0.0-beta.2" +tree_hash = "0.5.0" tokio = { version = "1.14.0", features = ["full"] } slog = "2.5.2" int_to_bytes = { path = "../../consensus/int_to_bytes" } diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index 122ca8eda6b..d0129834300 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -1,5 +1,5 @@ use crate::common::genesis_deposits; -use eth2_hashing::hash; +use ethereum_hashing::hash; use rayon::prelude::*; use ssz::Encode; use state_processing::initialize_beacon_state_from_eth1; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index e251b048565..8f253e2f245 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -24,7 +24,7 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lazy_static = "1.4.0" warp_utils = { path = "../../common/warp_utils" } slot_clock = { path = "../../common/slot_clock" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" bs58 = "0.4.0" futures = "0.3.8" execution_layer = {path = "../execution_layer"} @@ -32,11 +32,11 @@ parking_lot = "0.12.0" safe_arith = {path = "../../consensus/safe_arith"} task_executor = { path = "../../common/task_executor" } lru = "0.7.7" -tree_hash = "0.4.1" +tree_hash = "0.5.0" sysinfo = "0.26.5" system_health = { path = "../../common/system_health" } directory = { path = "../../common/directory" } -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" operation_pool = { path = "../operation_pool" } sensitive_url = { path = "../../common/sensitive_url" } unused_port = {path = "../../common/unused_port"} diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs index 8911e785e1e..616745dbefe 100644 --- a/beacon_node/http_api/src/ui.rs +++ b/beacon_node/http_api/src/ui.rs @@ -75,15 +75,15 @@ pub fn get_validator_count( #[derive(PartialEq, Serialize, Deserialize)] pub struct ValidatorInfoRequestData { - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] indices: Vec, } #[derive(PartialEq, Serialize, Deserialize)] pub struct ValidatorInfoValues { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] epoch: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] total_balance: u64, } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index dda797187ba..c1b4d721742 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -8,13 +8,13 @@ edition = "2021" discv5 = { version = "0.2.2", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } -eth2_ssz_types = "0.2.2" +ssz_types = "0.5.0" serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" -tree_hash = "0.4.1" -tree_hash_derive = "0.4.0" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" slog = { version = "2.5.2", features = ["max_level_trace"] } lighthouse_version = { path = "../../common/lighthouse_version" } tokio = { version = "1.14.0", features = ["time", "macros"] } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index d068a20079b..fbc45364aad 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -21,8 +21,8 @@ types = { path = "../../consensus/types" } slot_clock = { path = "../../common/slot_clock" } slog = { version = "2.5.2", features = ["max_level_trace"] } hex = "0.4.2" -eth2_ssz = "0.4.1" -eth2_ssz_types = "0.2.2" +ethereum_ssz = "0.5.0" +ssz_types = "0.5.0" futures = "0.3.7" error-chain = "0.12.4" tokio = { version = "1.14.0", features = ["full"] } diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index cc4eacde898..fdbecb656f4 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -12,8 +12,8 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } parking_lot = "0.12.0" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" rayon = "1.5.0" serde = "1.0.116" serde_derive = "1.0.116" diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 7ec2af9f9db..a1c65bd26dd 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -13,8 +13,8 @@ db-key = "0.0.5" leveldb = { version = "0.8.6", default-features = false } parking_lot = "0.12.0" itertools = "0.10.0" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } slog = "2.5.2" diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 081ab285e3c..7eb37a9b94b 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -10,7 +10,7 @@ clap = "2.33.3" clap_utils = { path = "../common/clap_utils" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } types = { path = "../consensus/types" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" slog = "2.5.2" tokio = "1.14.0" log = "0.4.11" diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index 62eb8aa3d5d..a882b7ce64b 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -11,7 +11,7 @@ clap = "2.33.3" hex = "0.4.2" dirs = "3.0.1" eth2_network_config = { path = "../eth2_network_config" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" ethereum-types = "0.14.1" serde = "1.0.116" serde_json = "1.0.59" diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index 7be0e8f3d27..aabc07fc524 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -14,6 +14,6 @@ hex = "0.4.2" [dependencies] types = { path = "../../consensus/types"} -eth2_ssz = "0.4.1" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +tree_hash = "0.5.0" ethabi = "16.0.0" diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index eca086d838f..2c5e7060b2e 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -13,15 +13,15 @@ types = { path = "../../consensus/types" } reqwest = { version = "0.11.0", features = ["json","stream"] } lighthouse_network = { path = "../../beacon_node/lighthouse_network" } proto_array = { path = "../../consensus/proto_array", optional = true } -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" eth2_keystore = { path = "../../crypto/eth2_keystore" } libsecp256k1 = "0.7.0" ring = "0.16.19" bytes = "1.0.1" account_utils = { path = "../../common/account_utils" } sensitive_url = { path = "../../common/sensitive_url" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" futures-util = "0.3.8" futures = "0.3.8" store = { path = "../../beacon_node/store", optional = true } diff --git a/common/eth2/src/lighthouse/attestation_rewards.rs b/common/eth2/src/lighthouse/attestation_rewards.rs index 314ffb85121..bebd1c661b3 100644 --- a/common/eth2/src/lighthouse/attestation_rewards.rs +++ b/common/eth2/src/lighthouse/attestation_rewards.rs @@ -6,32 +6,32 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct IdealAttestationRewards { // Validator's effective balance in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub effective_balance: u64, // Ideal attester's reward for head vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub head: u64, // Ideal attester's reward for target vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub target: u64, // Ideal attester's reward for source vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub source: u64, } #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct TotalAttestationRewards { // one entry for every validator based on their attestations in the epoch - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, // attester's reward for head vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub head: u64, // attester's reward for target vote in gwei - #[serde(with = "eth2_serde_utils::quoted_i64")] + #[serde(with = "serde_utils::quoted_i64")] pub target: i64, // attester's reward for source vote in gwei - #[serde(with = "eth2_serde_utils::quoted_i64")] + #[serde(with = "serde_utils::quoted_i64")] pub source: i64, // TBD attester's inclusion_delay reward in gwei (phase0 only) // pub inclusion_delay: u64, diff --git a/common/eth2/src/lighthouse/standard_block_rewards.rs b/common/eth2/src/lighthouse/standard_block_rewards.rs index 502577500d9..15fcdc60667 100644 --- a/common/eth2/src/lighthouse/standard_block_rewards.rs +++ b/common/eth2/src/lighthouse/standard_block_rewards.rs @@ -5,22 +5,22 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct StandardBlockReward { // proposer of the block, the proposer index who receives these rewards - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, // total block reward in gwei, // equal to attestations + sync_aggregate + proposer_slashings + attester_slashings - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub total: u64, // block reward component due to included attestations in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub attestations: u64, // block reward component due to included sync_aggregate in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub sync_aggregate: u64, // block reward component due to included proposer_slashings in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_slashings: u64, // block reward component due to included attester_slashings in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub attester_slashings: u64, } diff --git a/common/eth2/src/lighthouse/sync_committee_rewards.rs b/common/eth2/src/lighthouse/sync_committee_rewards.rs index e215d8e3e0b..66a721dc229 100644 --- a/common/eth2/src/lighthouse/sync_committee_rewards.rs +++ b/common/eth2/src/lighthouse/sync_committee_rewards.rs @@ -5,9 +5,9 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct SyncCommitteeReward { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, // sync committee reward in gwei for the validator - #[serde(with = "eth2_serde_utils::quoted_i64")] + #[serde(with = "serde_utils::quoted_i64")] pub reward: i64, } diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 90c128751d0..e576cfcb363 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -57,7 +57,7 @@ pub fn parse_pubkey(secret: &str) -> Result, Error> { &secret[SECRET_PREFIX.len()..] }; - eth2_serde_utils::hex::decode(secret) + serde_utils::hex::decode(secret) .map_err(|e| Error::InvalidSecret(format!("invalid hex: {:?}", e))) .and_then(|bytes| { if bytes.len() != PK_LEN { @@ -174,7 +174,7 @@ impl ValidatorClientHttpClient { let message = Message::parse_slice(digest(&SHA256, &body).as_ref()).expect("sha256 is 32 bytes"); - eth2_serde_utils::hex::decode(&sig) + serde_utils::hex::decode(&sig) .ok() .and_then(|bytes| { let sig = Signature::parse_der(&bytes).ok()?; diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index 887bcb99ea6..0d67df47a9a 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -13,7 +13,7 @@ pub struct GetFeeRecipientResponse { #[derive(Debug, Deserialize, Serialize, PartialEq)] pub struct GetGasLimitResponse { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub gas_limit: u64, } @@ -45,7 +45,7 @@ pub struct ImportKeystoresRequest { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(transparent)] -pub struct KeystoreJsonStr(#[serde(with = "eth2_serde_utils::json_str")] pub Keystore); +pub struct KeystoreJsonStr(#[serde(with = "serde_utils::json_str")] pub Keystore); impl std::ops::Deref for KeystoreJsonStr { type Target = Keystore; @@ -56,7 +56,7 @@ impl std::ops::Deref for KeystoreJsonStr { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(transparent)] -pub struct InterchangeJsonStr(#[serde(with = "eth2_serde_utils::json_str")] pub Interchange); +pub struct InterchangeJsonStr(#[serde(with = "serde_utils::json_str")] pub Interchange); #[derive(Debug, Deserialize, Serialize)] pub struct ImportKeystoresResponse { @@ -103,7 +103,7 @@ pub struct DeleteKeystoresRequest { #[derive(Debug, Deserialize, Serialize)] pub struct DeleteKeystoresResponse { pub data: Vec>, - #[serde(with = "eth2_serde_utils::json_str")] + #[serde(with = "serde_utils::json_str")] pub slashing_protection: Interchange, } diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index fa5d4ae119e..dd2ed03221b 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -32,14 +32,14 @@ pub struct ValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub builder_proposals: Option, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub deposit_gwei: u64, } #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct CreateValidatorsMnemonicRequest { pub mnemonic: ZeroizeString, - #[serde(with = "eth2_serde_utils::quoted_u32")] + #[serde(with = "serde_utils::quoted_u32")] pub key_derivation_path_offset: u32, pub validators: Vec, } @@ -62,7 +62,7 @@ pub struct CreatedValidator { #[serde(skip_serializing_if = "Option::is_none")] pub builder_proposals: Option, pub eth1_deposit_tx_data: String, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub deposit_gwei: u64, } @@ -141,7 +141,7 @@ pub struct UpdateFeeRecipientRequest { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct UpdateGasLimitRequest { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub gas_limit: u64, } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index d14746551c3..809200ee39b 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -82,10 +82,10 @@ impl std::fmt::Display for EndpointVersion { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct GenesisData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub genesis_time: u64, pub genesis_validators_root: Hash256, - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub genesis_fork_version: [u8; 4], } @@ -316,9 +316,9 @@ impl fmt::Display for ValidatorId { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub balance: u64, pub status: ValidatorStatus, pub validator: Validator, @@ -326,9 +326,9 @@ pub struct ValidatorData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorBalanceData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub balance: u64, } @@ -491,16 +491,16 @@ pub struct ValidatorsQuery { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CommitteeData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub validators: Vec, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncCommitteeByValidatorIndices { - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub validators: Vec, pub validator_aggregates: Vec, } @@ -513,7 +513,7 @@ pub struct RandaoMix { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(transparent)] pub struct SyncSubcommittee { - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub indices: Vec, } @@ -538,7 +538,7 @@ pub struct BlockHeaderData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct DepositContractData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub chain_id: u64, pub address: Address, } @@ -562,7 +562,7 @@ pub struct IdentityData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct MetaData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub seq_number: u64, pub attnets: String, pub syncnets: String, @@ -649,27 +649,27 @@ pub struct ValidatorBalancesQuery { #[derive(Clone, Serialize, Deserialize)] #[serde(transparent)] -pub struct ValidatorIndexData(#[serde(with = "eth2_serde_utils::quoted_u64_vec")] pub Vec); +pub struct ValidatorIndexData(#[serde(with = "serde_utils::quoted_u64_vec")] pub Vec); /// Borrowed variant of `ValidatorIndexData`, for serializing/sending. #[derive(Clone, Copy, Serialize)] #[serde(transparent)] pub struct ValidatorIndexDataRef<'a>( - #[serde(serialize_with = "eth2_serde_utils::quoted_u64_vec::serialize")] pub &'a [u64], + #[serde(serialize_with = "serde_utils::quoted_u64_vec::serialize")] pub &'a [u64], ); #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AttesterData { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committees_at_slot: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committee_index: CommitteeIndex, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committee_length: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_committee_index: u64, pub slot: Slot, } @@ -677,7 +677,7 @@ pub struct AttesterData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ProposerData { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, pub slot: Slot, } @@ -726,11 +726,11 @@ pub struct ValidatorAggregateAttestationQuery { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] pub struct BeaconCommitteeSubscription { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committee_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committees_at_slot: u64, pub slot: Slot, pub is_aggregator: bool, @@ -851,13 +851,13 @@ impl fmt::Display for PeerDirection { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct PeerCount { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub connected: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub connecting: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub disconnected: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub disconnecting: u64, } @@ -892,7 +892,7 @@ pub struct SseHead { #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct SseChainReorg { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub depth: u64, pub old_head_block: Hash256, pub old_head_state: Hash256, @@ -925,7 +925,7 @@ pub struct SseLateHead { #[serde(untagged)] pub struct SsePayloadAttributes { #[superstruct(getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub timestamp: u64, #[superstruct(getter(copy))] pub prev_randao: Hash256, @@ -938,10 +938,10 @@ pub struct SsePayloadAttributes { #[derive(PartialEq, Debug, Deserialize, Serialize, Clone)] pub struct SseExtendedPayloadAttributesGeneric { pub proposal_slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, pub parent_block_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub parent_block_number: u64, pub parent_block_hash: ExecutionBlockHash, pub payload_attributes: T, @@ -1205,13 +1205,13 @@ fn parse_accept(accept: &str) -> Result, String> { #[derive(Debug, Serialize, Deserialize)] pub struct LivenessRequestData { pub epoch: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub indices: Vec, } #[derive(PartialEq, Debug, Serialize, Deserialize)] pub struct LivenessResponseData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, pub epoch: Epoch, pub is_live: bool, @@ -1262,7 +1262,7 @@ pub struct ForkChoiceNode { pub parent_root: Option, pub justified_epoch: Option, pub finalized_epoch: Option, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub weight: u64, pub validity: Option, pub execution_block_hash: Option, diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index 5f577bedc3c..7a376568eb8 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" [dependencies] lazy_static = "1.4.0" num-bigint = "0.4.2" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" hex = "0.4.2" serde_yaml = "0.8.13" serde = "1.0.116" diff --git a/common/eth2_interop_keypairs/src/lib.rs b/common/eth2_interop_keypairs/src/lib.rs index eb26f563e0d..7b5fa7a8e4f 100644 --- a/common/eth2_interop_keypairs/src/lib.rs +++ b/common/eth2_interop_keypairs/src/lib.rs @@ -20,7 +20,7 @@ extern crate lazy_static; use bls::{Keypair, PublicKey, SecretKey}; -use eth2_hashing::hash; +use ethereum_hashing::hash; use num_bigint::BigUint; use serde_derive::{Deserialize, Serialize}; use std::convert::TryInto; diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index 0eba4cf2327..39a14e28377 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -16,7 +16,7 @@ filesystem = { path = "../filesystem" } types = { path = "../../consensus/types" } rand = "0.8.5" deposit_contract = { path = "../deposit_contract" } -tree_hash = "0.4.1" +tree_hash = "0.5.0" hex = "0.4.2" derivative = "2.1.1" lockfile = { path = "../lockfile" } diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index 0e0ef0707e2..c2856003bfd 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -6,11 +6,11 @@ edition = "2021" [dependencies] ethereum-types = "0.14.1" -eth2_ssz_types = "0.2.2" -eth2_hashing = "0.3.0" -eth2_ssz_derive = "0.3.1" -eth2_ssz = "0.4.1" -tree_hash = "0.4.1" +ssz_types = "0.5.0" +ethereum_hashing = "1.0.0-beta.2" +ethereum_ssz_derive = "0.5.0" +ethereum_ssz = "0.5.0" +tree_hash = "0.5.0" smallvec = "1.6.1" [dev-dependencies] diff --git a/consensus/cached_tree_hash/src/cache.rs b/consensus/cached_tree_hash/src/cache.rs index edb60f30600..3b4878503ea 100644 --- a/consensus/cached_tree_hash/src/cache.rs +++ b/consensus/cached_tree_hash/src/cache.rs @@ -1,7 +1,7 @@ use crate::cache_arena; use crate::SmallVec8; use crate::{Error, Hash256}; -use eth2_hashing::{hash32_concat, ZERO_HASHES}; +use ethereum_hashing::{hash32_concat, ZERO_HASHES}; use smallvec::smallvec; use ssz_derive::{Decode, Encode}; use tree_hash::BYTES_PER_CHUNK; diff --git a/consensus/cached_tree_hash/src/test.rs b/consensus/cached_tree_hash/src/test.rs index 244439ab30a..69b49826bf8 100644 --- a/consensus/cached_tree_hash/src/test.rs +++ b/consensus/cached_tree_hash/src/test.rs @@ -1,6 +1,6 @@ use crate::impls::hash256_iter; use crate::{CacheArena, CachedTreeHash, Error, Hash256, TreeHashCache}; -use eth2_hashing::ZERO_HASHES; +use ethereum_hashing::ZERO_HASHES; use quickcheck_macros::quickcheck; use ssz_types::{ typenum::{Unsigned, U16, U255, U256, U257}, diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index f0381e5ad99..3864d52d47c 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -10,8 +10,8 @@ edition = "2021" types = { path = "../types" } state_processing = { path = "../state_processing" } proto_array = { path = "../proto_array" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } [dev-dependencies] diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index 2c0dbf1a758..2b883f8646e 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] ethereum-types = "0.14.1" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" lazy_static = "1.4.0" safe_arith = { path = "../safe_arith" } diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index 887deb1efd6..dc3de71cefd 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -1,4 +1,4 @@ -use eth2_hashing::{hash, hash32_concat, ZERO_HASHES}; +use ethereum_hashing::{hash, hash32_concat, ZERO_HASHES}; use ethereum_types::H256; use lazy_static::lazy_static; use safe_arith::ArithError; diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 205ef8f5210..cd43c566f00 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -10,8 +10,8 @@ path = "src/bin.rs" [dependencies] types = { path = "../types" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" serde = "1.0.116" serde_derive = "1.0.116" serde_yaml = "0.8.13" diff --git a/consensus/serde_utils/Cargo.toml b/consensus/serde_utils/Cargo.toml deleted file mode 100644 index d4ba02765fb..00000000000 --- a/consensus/serde_utils/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "eth2_serde_utils" -version = "0.1.1" -authors = ["Paul Hauner "] -edition = "2021" -description = "Serialization and deserialization utilities useful for JSON representations of Ethereum 2.0 types." -license = "Apache-2.0" - -[dependencies] -serde = { version = "1.0.116", features = ["derive"] } -serde_derive = "1.0.116" -serde_json = "1.0.58" -hex = "0.4.2" -ethereum-types = "0.14.1" diff --git a/consensus/serde_utils/src/fixed_bytes_hex.rs b/consensus/serde_utils/src/fixed_bytes_hex.rs deleted file mode 100644 index 4e9dc98aca8..00000000000 --- a/consensus/serde_utils/src/fixed_bytes_hex.rs +++ /dev/null @@ -1,52 +0,0 @@ -//! Formats `[u8; n]` as a 0x-prefixed hex string. -//! -//! E.g., `[0, 1, 2, 3]` serializes as `"0x00010203"`. - -use crate::hex::PrefixedHexVisitor; -use serde::de::Error; -use serde::{Deserializer, Serializer}; - -macro_rules! bytes_hex { - ($num_bytes: tt) => { - use super::*; - - const BYTES_LEN: usize = $num_bytes; - - pub fn serialize(bytes: &[u8; BYTES_LEN], serializer: S) -> Result - where - S: Serializer, - { - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(&bytes)); - - serializer.serialize_str(&hex_string) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; BYTES_LEN], D::Error> - where - D: Deserializer<'de>, - { - let decoded = deserializer.deserialize_str(PrefixedHexVisitor)?; - - if decoded.len() != BYTES_LEN { - return Err(D::Error::custom(format!( - "expected {} bytes for array, got {}", - BYTES_LEN, - decoded.len() - ))); - } - - let mut array = [0; BYTES_LEN]; - array.copy_from_slice(&decoded); - Ok(array) - } - }; -} - -pub mod bytes_4_hex { - bytes_hex!(4); -} - -pub mod bytes_8_hex { - bytes_hex!(8); -} diff --git a/consensus/serde_utils/src/hex.rs b/consensus/serde_utils/src/hex.rs deleted file mode 100644 index 9a2cd65c764..00000000000 --- a/consensus/serde_utils/src/hex.rs +++ /dev/null @@ -1,77 +0,0 @@ -//! Provides utilities for parsing 0x-prefixed hex strings. - -use serde::de::{self, Visitor}; -use std::fmt; - -/// Encode `data` as a 0x-prefixed hex string. -pub fn encode>(data: T) -> String { - let hex = hex::encode(data); - - let mut s = "0x".to_string(); - s.push_str(hex.as_str()); - s -} - -/// Decode `data` from a 0x-prefixed hex string. -pub fn decode(s: &str) -> Result, String> { - if let Some(stripped) = s.strip_prefix("0x") { - hex::decode(stripped).map_err(|e| format!("invalid hex: {:?}", e)) - } else { - Err("hex must have 0x prefix".to_string()) - } -} - -pub struct PrefixedHexVisitor; - -impl<'de> Visitor<'de> for PrefixedHexVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a hex string with 0x prefix") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - decode(value).map_err(de::Error::custom) - } -} - -pub struct HexVisitor; - -impl<'de> Visitor<'de> for HexVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a hex string (irrelevant of prefix)") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - hex::decode(value.trim_start_matches("0x")) - .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn encoding() { - let bytes = vec![0, 255]; - let hex = encode(bytes); - assert_eq!(hex.as_str(), "0x00ff"); - - let bytes = vec![]; - let hex = encode(bytes); - assert_eq!(hex.as_str(), "0x"); - - let bytes = vec![1, 2, 3]; - let hex = encode(bytes); - assert_eq!(hex.as_str(), "0x010203"); - } -} diff --git a/consensus/serde_utils/src/hex_vec.rs b/consensus/serde_utils/src/hex_vec.rs deleted file mode 100644 index f7f4833628c..00000000000 --- a/consensus/serde_utils/src/hex_vec.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! Formats `Vec` as a 0x-prefixed hex string. -//! -//! E.g., `vec![0, 1, 2, 3]` serializes as `"0x00010203"`. - -use crate::hex::PrefixedHexVisitor; -use serde::{Deserializer, Serializer}; - -pub fn serialize(bytes: &[u8], serializer: S) -> Result -where - S: Serializer, -{ - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(bytes)); - - serializer.serialize_str(&hex_string) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - deserializer.deserialize_str(PrefixedHexVisitor) -} diff --git a/consensus/serde_utils/src/json_str.rs b/consensus/serde_utils/src/json_str.rs deleted file mode 100644 index b9a1813915a..00000000000 --- a/consensus/serde_utils/src/json_str.rs +++ /dev/null @@ -1,25 +0,0 @@ -//! Serialize a datatype as a JSON-blob within a single string. -use serde::{ - de::{DeserializeOwned, Error as _}, - ser::Error as _, - Deserialize, Deserializer, Serialize, Serializer, -}; - -/// Serialize as a JSON object within a string. -pub fn serialize(value: &T, serializer: S) -> Result -where - S: Serializer, - T: Serialize, -{ - serializer.serialize_str(&serde_json::to_string(value).map_err(S::Error::custom)?) -} - -/// Deserialize a JSON object embedded in a string. -pub fn deserialize<'de, T, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, - T: DeserializeOwned, -{ - let json_str = String::deserialize(deserializer)?; - serde_json::from_str(&json_str).map_err(D::Error::custom) -} diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs deleted file mode 100644 index 5c5dafc6656..00000000000 --- a/consensus/serde_utils/src/lib.rs +++ /dev/null @@ -1,15 +0,0 @@ -mod quoted_int; - -pub mod fixed_bytes_hex; -pub mod hex; -pub mod hex_vec; -pub mod json_str; -pub mod list_of_bytes_lists; -pub mod quoted_u64_vec; -pub mod u256_hex_be; -pub mod u32_hex; -pub mod u64_hex_be; -pub mod u8_hex; - -pub use fixed_bytes_hex::{bytes_4_hex, bytes_8_hex}; -pub use quoted_int::{quoted_i64, quoted_u256, quoted_u32, quoted_u64, quoted_u8}; diff --git a/consensus/serde_utils/src/list_of_bytes_lists.rs b/consensus/serde_utils/src/list_of_bytes_lists.rs deleted file mode 100644 index b93321aa06b..00000000000 --- a/consensus/serde_utils/src/list_of_bytes_lists.rs +++ /dev/null @@ -1,49 +0,0 @@ -//! Formats `Vec` using quotes. -//! -//! E.g., `vec![0, 1, 2]` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. - -use crate::hex; -use serde::ser::SerializeSeq; -use serde::{de, Deserializer, Serializer}; - -pub struct ListOfBytesListVisitor; -impl<'a> serde::de::Visitor<'a> for ListOfBytesListVisitor { - type Value = Vec>; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of 0x-prefixed byte lists") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let mut vec = vec![]; - - while let Some(val) = seq.next_element::()? { - vec.push(hex::decode(&val).map_err(de::Error::custom)?); - } - - Ok(vec) - } -} - -pub fn serialize(value: &[Vec], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for val in value { - seq.serialize_element(&hex::encode(val))?; - } - seq.end() -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result>, D::Error> -where - D: Deserializer<'de>, -{ - deserializer.deserialize_any(ListOfBytesListVisitor) -} diff --git a/consensus/serde_utils/src/quoted_int.rs b/consensus/serde_utils/src/quoted_int.rs deleted file mode 100644 index 0cc35aa318c..00000000000 --- a/consensus/serde_utils/src/quoted_int.rs +++ /dev/null @@ -1,247 +0,0 @@ -//! Formats some integer types using quotes. -//! -//! E.g., `1` serializes as `"1"`. -//! -//! Quotes can be optional during decoding. - -use ethereum_types::U256; -use serde::{Deserializer, Serializer}; -use serde_derive::{Deserialize, Serialize}; -use std::convert::TryFrom; -use std::marker::PhantomData; - -macro_rules! define_mod { - ($int: ty) => { - /// Serde support for deserializing quoted integers. - /// - /// Configurable so that quotes are either required or optional. - pub struct QuotedIntVisitor { - require_quotes: bool, - _phantom: PhantomData, - } - - impl<'a, T> serde::de::Visitor<'a> for QuotedIntVisitor - where - T: From<$int> + Into<$int> + Copy + TryFrom, - { - type Value = T; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - if self.require_quotes { - write!(formatter, "a quoted integer") - } else { - write!(formatter, "a quoted or unquoted integer") - } - } - - fn visit_str(self, s: &str) -> Result - where - E: serde::de::Error, - { - s.parse::<$int>() - .map(T::from) - .map_err(serde::de::Error::custom) - } - - fn visit_u64(self, v: u64) -> Result - where - E: serde::de::Error, - { - if self.require_quotes { - Err(serde::de::Error::custom( - "received unquoted integer when quotes are required", - )) - } else { - T::try_from(v).map_err(|_| serde::de::Error::custom("invalid integer")) - } - } - } - - /// Compositional wrapper type that allows quotes or no quotes. - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] - #[serde(transparent)] - pub struct MaybeQuoted - where - T: From<$int> + Into<$int> + Copy + TryFrom, - { - #[serde(with = "self")] - pub value: T, - } - - /// Wrapper type for requiring quotes on a `$int`-like type. - /// - /// Unlike using `serde(with = "quoted_$int::require_quotes")` this is composable, and can be nested - /// inside types like `Option`, `Result` and `Vec`. - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] - #[serde(transparent)] - pub struct Quoted - where - T: From<$int> + Into<$int> + Copy + TryFrom, - { - #[serde(with = "require_quotes")] - pub value: T, - } - - /// Serialize with quotes. - pub fn serialize(value: &T, serializer: S) -> Result - where - S: Serializer, - T: From<$int> + Into<$int> + Copy, - { - let v: $int = (*value).into(); - serializer.serialize_str(&format!("{}", v)) - } - - /// Deserialize with or without quotes. - pub fn deserialize<'de, D, T>(deserializer: D) -> Result - where - D: Deserializer<'de>, - T: From<$int> + Into<$int> + Copy + TryFrom, - { - deserializer.deserialize_any(QuotedIntVisitor { - require_quotes: false, - _phantom: PhantomData, - }) - } - - /// Requires quotes when deserializing. - /// - /// Usage: `#[serde(with = "quoted_u64::require_quotes")]`. - pub mod require_quotes { - pub use super::serialize; - use super::*; - - pub fn deserialize<'de, D, T>(deserializer: D) -> Result - where - D: Deserializer<'de>, - T: From<$int> + Into<$int> + Copy + TryFrom, - { - deserializer.deserialize_any(QuotedIntVisitor { - require_quotes: true, - _phantom: PhantomData, - }) - } - } - - #[cfg(test)] - mod test { - use super::*; - - #[test] - fn require_quotes() { - let x = serde_json::from_str::>("\"8\"").unwrap(); - assert_eq!(x.value, 8); - serde_json::from_str::>("8").unwrap_err(); - } - } - }; -} - -pub mod quoted_u8 { - use super::*; - - define_mod!(u8); -} - -pub mod quoted_u32 { - use super::*; - - define_mod!(u32); -} - -pub mod quoted_u64 { - use super::*; - - define_mod!(u64); -} - -pub mod quoted_i64 { - use super::*; - - define_mod!(i64); -} - -pub mod quoted_u256 { - use super::*; - - struct U256Visitor; - - impl<'de> serde::de::Visitor<'de> for U256Visitor { - type Value = U256; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - formatter.write_str("a quoted U256 integer") - } - - fn visit_str(self, v: &str) -> Result - where - E: serde::de::Error, - { - U256::from_dec_str(v).map_err(serde::de::Error::custom) - } - } - - /// Serialize with quotes. - pub fn serialize(value: &U256, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&format!("{}", value)) - } - - /// Deserialize with quotes. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_str(U256Visitor) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct WrappedU256(#[serde(with = "quoted_u256")] U256); - - #[test] - fn u256_with_quotes() { - assert_eq!( - &serde_json::to_string(&WrappedU256(U256::one())).unwrap(), - "\"1\"" - ); - assert_eq!( - serde_json::from_str::("\"1\"").unwrap(), - WrappedU256(U256::one()) - ); - } - - #[test] - fn u256_without_quotes() { - serde_json::from_str::("1").unwrap_err(); - } - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct WrappedI64(#[serde(with = "quoted_i64")] i64); - - #[test] - fn negative_i64_with_quotes() { - assert_eq!( - serde_json::from_str::("\"-200\"").unwrap().0, - -200 - ); - assert_eq!( - serde_json::to_string(&WrappedI64(-12_500)).unwrap(), - "\"-12500\"" - ); - } - - // It would be OK if this worked, but we don't need it to (i64s should always be quoted). - #[test] - fn negative_i64_without_quotes() { - serde_json::from_str::("-200").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/quoted_u64_vec.rs b/consensus/serde_utils/src/quoted_u64_vec.rs deleted file mode 100644 index f124c989092..00000000000 --- a/consensus/serde_utils/src/quoted_u64_vec.rs +++ /dev/null @@ -1,97 +0,0 @@ -//! Formats `Vec` using quotes. -//! -//! E.g., `vec![0, 1, 2]` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. - -use serde::ser::SerializeSeq; -use serde::{Deserializer, Serializer}; -use serde_derive::{Deserialize, Serialize}; - -#[derive(Serialize, Deserialize)] -#[serde(transparent)] -pub struct QuotedIntWrapper { - #[serde(with = "crate::quoted_u64")] - pub int: u64, -} - -pub struct QuotedIntVecVisitor; -impl<'a> serde::de::Visitor<'a> for QuotedIntVecVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of quoted or unquoted integers") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let mut vec = vec![]; - - while let Some(val) = seq.next_element()? { - let val: QuotedIntWrapper = val; - vec.push(val.int); - } - - Ok(vec) - } -} - -pub fn serialize(value: &[u64], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for &int in value { - seq.serialize_element(&QuotedIntWrapper { int })?; - } - seq.end() -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - deserializer.deserialize_any(QuotedIntVecVisitor) -} - -#[cfg(test)] -mod test { - use super::*; - - #[derive(Debug, Serialize, Deserialize)] - struct Obj { - #[serde(with = "crate::quoted_u64_vec")] - values: Vec, - } - - #[test] - fn quoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", "2", "3", "4"] }"#).unwrap(); - assert_eq!(obj.values, vec![1, 2, 3, 4]); - } - - #[test] - fn unquoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2, 3, 4] }"#).unwrap(); - assert_eq!(obj.values, vec![1, 2, 3, 4]); - } - - #[test] - fn mixed_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", 2, "3", "4"] }"#).unwrap(); - assert_eq!(obj.values, vec![1, 2, 3, 4]); - } - - #[test] - fn empty_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [] }"#).unwrap(); - assert!(obj.values.is_empty()); - } - - #[test] - fn whole_list_quoted_err() { - serde_json::from_str::(r#"{ "values": "[1, 2, 3, 4]" }"#).unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/u256_hex_be.rs b/consensus/serde_utils/src/u256_hex_be.rs deleted file mode 100644 index 8007e5792c3..00000000000 --- a/consensus/serde_utils/src/u256_hex_be.rs +++ /dev/null @@ -1,144 +0,0 @@ -use ethereum_types::U256; - -use serde::de::Visitor; -use serde::{de, Deserializer, Serialize, Serializer}; -use std::fmt; -use std::str::FromStr; - -pub fn serialize(num: &U256, serializer: S) -> Result -where - S: Serializer, -{ - num.serialize(serializer) -} - -pub struct U256Visitor; - -impl<'de> Visitor<'de> for U256Visitor { - type Value = String; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a well formatted hex string") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - if !value.starts_with("0x") { - return Err(de::Error::custom("must start with 0x")); - } - let stripped = &value[2..]; - if stripped.is_empty() { - Err(de::Error::custom(format!( - "quantity cannot be {:?}", - stripped - ))) - } else if stripped == "0" { - Ok(value.to_string()) - } else if stripped.starts_with('0') { - Err(de::Error::custom("cannot have leading zero")) - } else { - Ok(value.to_string()) - } - } -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let decoded = deserializer.deserialize_string(U256Visitor)?; - - U256::from_str(&decoded).map_err(|e| de::Error::custom(format!("Invalid U256 string: {}", e))) -} - -#[cfg(test)] -mod test { - use ethereum_types::U256; - use serde::{Deserialize, Serialize}; - use serde_json; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct Wrapper { - #[serde(with = "super")] - val: U256, - } - - #[test] - fn encoding() { - assert_eq!( - &serde_json::to_string(&Wrapper { val: 0.into() }).unwrap(), - "\"0x0\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1.into() }).unwrap(), - "\"0x1\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 256.into() }).unwrap(), - "\"0x100\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 65.into() }).unwrap(), - "\"0x41\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1024.into() }).unwrap(), - "\"0x400\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: U256::max_value() - 1 - }) - .unwrap(), - "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: U256::max_value() - }) - .unwrap(), - "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" - ); - } - - #[test] - fn decoding() { - assert_eq!( - serde_json::from_str::("\"0x0\"").unwrap(), - Wrapper { val: 0.into() }, - ); - assert_eq!( - serde_json::from_str::("\"0x41\"").unwrap(), - Wrapper { val: 65.into() }, - ); - assert_eq!( - serde_json::from_str::("\"0x400\"").unwrap(), - Wrapper { val: 1024.into() }, - ); - assert_eq!( - serde_json::from_str::( - "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" - ) - .unwrap(), - Wrapper { - val: U256::max_value() - 1 - }, - ); - assert_eq!( - serde_json::from_str::( - "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" - ) - .unwrap(), - Wrapper { - val: U256::max_value() - }, - ); - serde_json::from_str::("\"0x\"").unwrap_err(); - serde_json::from_str::("\"0x0400\"").unwrap_err(); - serde_json::from_str::("\"400\"").unwrap_err(); - serde_json::from_str::("\"ff\"").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/u32_hex.rs b/consensus/serde_utils/src/u32_hex.rs deleted file mode 100644 index c1ab3537b2a..00000000000 --- a/consensus/serde_utils/src/u32_hex.rs +++ /dev/null @@ -1,21 +0,0 @@ -//! Formats `u32` as a 0x-prefixed, little-endian hex string. -//! -//! E.g., `0` serializes as `"0x00000000"`. - -use crate::bytes_4_hex; -use serde::{Deserializer, Serializer}; - -pub fn serialize(num: &u32, serializer: S) -> Result -where - S: Serializer, -{ - let hex = format!("0x{}", hex::encode(num.to_le_bytes())); - serializer.serialize_str(&hex) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - bytes_4_hex::deserialize(deserializer).map(u32::from_le_bytes) -} diff --git a/consensus/serde_utils/src/u64_hex_be.rs b/consensus/serde_utils/src/u64_hex_be.rs deleted file mode 100644 index e3364a2d2c9..00000000000 --- a/consensus/serde_utils/src/u64_hex_be.rs +++ /dev/null @@ -1,134 +0,0 @@ -//! Formats `u64` as a 0x-prefixed, big-endian hex string. -//! -//! E.g., `0` serializes as `"0x0000000000000000"`. - -use serde::de::{self, Error, Visitor}; -use serde::{Deserializer, Serializer}; -use std::fmt; - -const BYTES_LEN: usize = 8; - -pub struct QuantityVisitor; -impl<'de> Visitor<'de> for QuantityVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a hex string") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - if !value.starts_with("0x") { - return Err(de::Error::custom("must start with 0x")); - } - - let stripped = value.trim_start_matches("0x"); - - if stripped.is_empty() { - Err(de::Error::custom(format!( - "quantity cannot be {}", - stripped - ))) - } else if stripped == "0" { - Ok(vec![0]) - } else if stripped.starts_with('0') { - Err(de::Error::custom("cannot have leading zero")) - } else if stripped.len() % 2 != 0 { - hex::decode(format!("0{}", stripped)) - .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) - } else { - hex::decode(stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) - } - } -} - -pub fn serialize(num: &u64, serializer: S) -> Result -where - S: Serializer, -{ - let raw = hex::encode(num.to_be_bytes()); - let trimmed = raw.trim_start_matches('0'); - - let hex = if trimmed.is_empty() { "0" } else { trimmed }; - - serializer.serialize_str(&format!("0x{}", &hex)) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let decoded = deserializer.deserialize_str(QuantityVisitor)?; - - // TODO: this is not strict about byte length like other methods. - if decoded.len() > BYTES_LEN { - return Err(D::Error::custom(format!( - "expected max {} bytes for array, got {}", - BYTES_LEN, - decoded.len() - ))); - } - - let mut array = [0; BYTES_LEN]; - array[BYTES_LEN - decoded.len()..].copy_from_slice(&decoded); - Ok(u64::from_be_bytes(array)) -} - -#[cfg(test)] -mod test { - use serde::{Deserialize, Serialize}; - use serde_json; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct Wrapper { - #[serde(with = "super")] - val: u64, - } - - #[test] - fn encoding() { - assert_eq!( - &serde_json::to_string(&Wrapper { val: 0 }).unwrap(), - "\"0x0\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1 }).unwrap(), - "\"0x1\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 256 }).unwrap(), - "\"0x100\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 65 }).unwrap(), - "\"0x41\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1024 }).unwrap(), - "\"0x400\"" - ); - } - - #[test] - fn decoding() { - assert_eq!( - serde_json::from_str::("\"0x0\"").unwrap(), - Wrapper { val: 0 }, - ); - assert_eq!( - serde_json::from_str::("\"0x41\"").unwrap(), - Wrapper { val: 65 }, - ); - assert_eq!( - serde_json::from_str::("\"0x400\"").unwrap(), - Wrapper { val: 1024 }, - ); - serde_json::from_str::("\"0x\"").unwrap_err(); - serde_json::from_str::("\"0x0400\"").unwrap_err(); - serde_json::from_str::("\"400\"").unwrap_err(); - serde_json::from_str::("\"ff\"").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/u8_hex.rs b/consensus/serde_utils/src/u8_hex.rs deleted file mode 100644 index 8083e1d120b..00000000000 --- a/consensus/serde_utils/src/u8_hex.rs +++ /dev/null @@ -1,29 +0,0 @@ -//! Formats `u8` as a 0x-prefixed hex string. -//! -//! E.g., `0` serializes as `"0x00"`. - -use crate::hex::PrefixedHexVisitor; -use serde::de::Error; -use serde::{Deserializer, Serializer}; - -pub fn serialize(byte: &u8, serializer: S) -> Result -where - S: Serializer, -{ - let hex = format!("0x{}", hex::encode([*byte])); - serializer.serialize_str(&hex) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - if bytes.len() != 1 { - return Err(D::Error::custom(format!( - "expected 1 byte for u8, got {}", - bytes.len() - ))); - } - Ok(bytes[0]) -} diff --git a/consensus/ssz/Cargo.toml b/consensus/ssz/Cargo.toml deleted file mode 100644 index d39ad10875a..00000000000 --- a/consensus/ssz/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "eth2_ssz" -version = "0.4.1" -authors = ["Paul Hauner "] -edition = "2021" -description = "SimpleSerialize (SSZ) as used in Ethereum 2.0" -license = "Apache-2.0" - -[lib] -name = "ssz" - -[dev-dependencies] -eth2_ssz_derive = "0.3.1" - -[dependencies] -ethereum-types = "0.14.1" -smallvec = { version = "1.6.1", features = ["const_generics"] } -itertools = "0.10.3" - -[features] -arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/ssz/README.md b/consensus/ssz/README.md deleted file mode 100644 index 04603cda33c..00000000000 --- a/consensus/ssz/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# simpleserialize (ssz) - -[](https://crates.io/crates/eth2_ssz) diff --git a/consensus/ssz/examples/large_list.rs b/consensus/ssz/examples/large_list.rs deleted file mode 100644 index a1b10ab7a3e..00000000000 --- a/consensus/ssz/examples/large_list.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! Encode and decode a list many times. -//! -//! Useful for `cargo flamegraph`. - -use ssz::{Decode, Encode}; - -fn main() { - let vec: Vec = vec![4242; 8196]; - - let output: Vec> = (0..40_000) - .map(|_| Vec::from_ssz_bytes(&vec.as_ssz_bytes()).unwrap()) - .collect(); - - println!("{}", output.len()); -} diff --git a/consensus/ssz/examples/large_list_of_structs.rs b/consensus/ssz/examples/large_list_of_structs.rs deleted file mode 100644 index 2aaaf9b8a53..00000000000 --- a/consensus/ssz/examples/large_list_of_structs.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! Encode and decode a list many times. -//! -//! Useful for `cargo flamegraph`. - -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; - -#[derive(Clone, Copy, Encode, Decode)] -pub struct FixedLen { - a: u64, - b: u64, - c: u64, - d: u64, -} - -fn main() { - let fixed_len = FixedLen { - a: 42, - b: 42, - c: 42, - d: 42, - }; - - let vec: Vec = vec![fixed_len; 8196]; - - let output: Vec> = (0..40_000) - .map(|_| Vec::from_ssz_bytes(&vec.as_ssz_bytes()).unwrap()) - .collect(); - - println!("{}", output.len()); -} diff --git a/consensus/ssz/examples/struct_definition.rs b/consensus/ssz/examples/struct_definition.rs deleted file mode 100644 index 123da12c58c..00000000000 --- a/consensus/ssz/examples/struct_definition.rs +++ /dev/null @@ -1,73 +0,0 @@ -use ssz::{Decode, DecodeError, Encode, SszDecoderBuilder, SszEncoder}; - -#[derive(Debug, PartialEq)] -pub struct Foo { - a: u16, - b: Vec, - c: u16, -} - -impl Encode for Foo { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() && as Encode>::is_ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - ::ssz_fixed_len() - + ssz::BYTES_PER_LENGTH_OFFSET - + ::ssz_fixed_len() - + self.b.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - let offset = ::ssz_fixed_len() - + as Encode>::ssz_fixed_len() - + ::ssz_fixed_len(); - - let mut encoder = SszEncoder::container(buf, offset); - - encoder.append(&self.a); - encoder.append(&self.b); - encoder.append(&self.c); - - encoder.finalize(); - } -} - -impl Decode for Foo { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() && as Decode>::is_ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let mut builder = SszDecoderBuilder::new(bytes); - - builder.register_type::()?; - builder.register_type::>()?; - builder.register_type::()?; - - let mut decoder = builder.build()?; - - Ok(Self { - a: decoder.decode_next()?, - b: decoder.decode_next()?, - c: decoder.decode_next()?, - }) - } -} - -fn main() { - let my_foo = Foo { - a: 42, - b: vec![0, 1, 2, 3], - c: 11, - }; - - let bytes = vec![42, 0, 8, 0, 0, 0, 11, 0, 0, 1, 2, 3]; - - assert_eq!(my_foo.as_ssz_bytes(), bytes); - - let decoded_foo = Foo::from_ssz_bytes(&bytes).unwrap(); - - assert_eq!(my_foo, decoded_foo); -} diff --git a/consensus/ssz/src/decode.rs b/consensus/ssz/src/decode.rs deleted file mode 100644 index 10b3573b169..00000000000 --- a/consensus/ssz/src/decode.rs +++ /dev/null @@ -1,374 +0,0 @@ -use super::*; -use smallvec::{smallvec, SmallVec}; -use std::cmp::Ordering; - -type SmallVec8 = SmallVec<[T; 8]>; - -pub mod impls; -pub mod try_from_iter; - -/// Returned when SSZ decoding fails. -#[derive(Debug, PartialEq, Clone)] -pub enum DecodeError { - /// The bytes supplied were too short to be decoded into the specified type. - InvalidByteLength { len: usize, expected: usize }, - /// The given bytes were too short to be read as a length prefix. - InvalidLengthPrefix { len: usize, expected: usize }, - /// A length offset pointed to a byte that was out-of-bounds (OOB). - /// - /// A bytes may be OOB for the following reasons: - /// - /// - It is `>= bytes.len()`. - /// - When decoding variable length items, the 1st offset points "backwards" into the fixed - /// length items (i.e., `length[0] < BYTES_PER_LENGTH_OFFSET`). - /// - When decoding variable-length items, the `n`'th offset was less than the `n-1`'th offset. - OutOfBoundsByte { i: usize }, - /// An offset points “backwards” into the fixed-bytes portion of the message, essentially - /// double-decoding bytes that will also be decoded as fixed-length. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#1-Offset-into-fixed-portion - OffsetIntoFixedPortion(usize), - /// The first offset does not point to the byte that follows the fixed byte portion, - /// essentially skipping a variable-length byte. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#2-Skip-first-variable-byte - OffsetSkipsVariableBytes(usize), - /// An offset points to bytes prior to the previous offset. Depending on how you look at it, - /// this either double-decodes bytes or makes the first offset a negative-length. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#3-Offsets-are-decreasing - OffsetsAreDecreasing(usize), - /// An offset references byte indices that do not exist in the source bytes. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#4-Offsets-are-out-of-bounds - OffsetOutOfBounds(usize), - /// A variable-length list does not have a fixed portion that is cleanly divisible by - /// `BYTES_PER_LENGTH_OFFSET`. - InvalidListFixedBytesLen(usize), - /// Some item has a `ssz_fixed_len` of zero. This is illegal. - ZeroLengthItem, - /// The given bytes were invalid for some application-level reason. - BytesInvalid(String), - /// The given union selector is out of bounds. - UnionSelectorInvalid(u8), -} - -/// Performs checks on the `offset` based upon the other parameters provided. -/// -/// ## Detail -/// -/// - `offset`: the offset bytes (e.g., result of `read_offset(..)`). -/// - `previous_offset`: unless this is the first offset in the SSZ object, the value of the -/// previously-read offset. Used to ensure offsets are not decreasing. -/// - `num_bytes`: the total number of bytes in the SSZ object. Used to ensure the offset is not -/// out of bounds. -/// - `num_fixed_bytes`: the number of fixed-bytes in the struct, if it is known. Used to ensure -/// that the first offset doesn't skip any variable bytes. -/// -/// ## References -/// -/// The checks here are derived from this document: -/// -/// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view -pub fn sanitize_offset( - offset: usize, - previous_offset: Option, - num_bytes: usize, - num_fixed_bytes: Option, -) -> Result { - if num_fixed_bytes.map_or(false, |fixed_bytes| offset < fixed_bytes) { - Err(DecodeError::OffsetIntoFixedPortion(offset)) - } else if previous_offset.is_none() - && num_fixed_bytes.map_or(false, |fixed_bytes| offset != fixed_bytes) - { - Err(DecodeError::OffsetSkipsVariableBytes(offset)) - } else if offset > num_bytes { - Err(DecodeError::OffsetOutOfBounds(offset)) - } else if previous_offset.map_or(false, |prev| prev > offset) { - Err(DecodeError::OffsetsAreDecreasing(offset)) - } else { - Ok(offset) - } -} - -/// Provides SSZ decoding (de-serialization) via the `from_ssz_bytes(&bytes)` method. -/// -/// See `examples/` for manual implementations or the crate root for implementations using -/// `#[derive(Decode)]`. -pub trait Decode: Sized { - /// Returns `true` if this object has a fixed-length. - /// - /// I.e., there are no variable length items in this object or any of it's contained objects. - fn is_ssz_fixed_len() -> bool; - - /// The number of bytes this object occupies in the fixed-length portion of the SSZ bytes. - /// - /// By default, this is set to `BYTES_PER_LENGTH_OFFSET` which is suitable for variable length - /// objects, but not fixed-length objects. Fixed-length objects _must_ return a value which - /// represents their length. - fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - /// Attempts to decode `Self` from `bytes`, returning a `DecodeError` on failure. - /// - /// The supplied bytes must be the exact length required to decode `Self`, excess bytes will - /// result in an error. - fn from_ssz_bytes(bytes: &[u8]) -> Result; -} - -#[derive(Copy, Clone, Debug)] -pub struct Offset { - position: usize, - offset: usize, -} - -/// Builds an `SszDecoder`. -/// -/// The purpose of this struct is to split some SSZ bytes into individual slices. The builder is -/// then converted into a `SszDecoder` which decodes those values into object instances. -/// -/// See [`SszDecoder`](struct.SszDecoder.html) for usage examples. -pub struct SszDecoderBuilder<'a> { - bytes: &'a [u8], - items: SmallVec8<&'a [u8]>, - offsets: SmallVec8, - items_index: usize, -} - -impl<'a> SszDecoderBuilder<'a> { - /// Instantiate a new builder that should build a `SszDecoder` over the given `bytes` which - /// are assumed to be the SSZ encoding of some object. - pub fn new(bytes: &'a [u8]) -> Self { - Self { - bytes, - items: smallvec![], - offsets: smallvec![], - items_index: 0, - } - } - - /// Registers a variable-length object as the next item in `bytes`, without specifying the - /// actual type. - /// - /// ## Notes - /// - /// Use of this function is generally discouraged since it cannot detect if some type changes - /// from variable to fixed length. - /// - /// Use `Self::register_type` wherever possible. - pub fn register_anonymous_variable_length_item(&mut self) -> Result<(), DecodeError> { - struct Anonymous; - - impl Decode for Anonymous { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(_bytes: &[u8]) -> Result { - unreachable!("Anonymous should never be decoded") - } - } - - self.register_type::() - } - - /// Declares that some type `T` is the next item in `bytes`. - pub fn register_type(&mut self) -> Result<(), DecodeError> { - self.register_type_parameterized(T::is_ssz_fixed_len(), T::ssz_fixed_len()) - } - - /// Declares that a type with the given parameters is the next item in `bytes`. - pub fn register_type_parameterized( - &mut self, - is_ssz_fixed_len: bool, - ssz_fixed_len: usize, - ) -> Result<(), DecodeError> { - if is_ssz_fixed_len { - let start = self.items_index; - self.items_index += ssz_fixed_len; - - let slice = - self.bytes - .get(start..self.items_index) - .ok_or(DecodeError::InvalidByteLength { - len: self.bytes.len(), - expected: self.items_index, - })?; - - self.items.push(slice); - } else { - self.offsets.push(Offset { - position: self.items.len(), - offset: sanitize_offset( - read_offset(&self.bytes[self.items_index..])?, - self.offsets.last().map(|o| o.offset), - self.bytes.len(), - None, - )?, - }); - - // Push an empty slice into items; it will be replaced later. - self.items.push(&[]); - - self.items_index += BYTES_PER_LENGTH_OFFSET; - } - - Ok(()) - } - - fn finalize(&mut self) -> Result<(), DecodeError> { - if let Some(first_offset) = self.offsets.first().map(|o| o.offset) { - // Check to ensure the first offset points to the byte immediately following the - // fixed-length bytes. - match first_offset.cmp(&self.items_index) { - Ordering::Less => return Err(DecodeError::OffsetIntoFixedPortion(first_offset)), - Ordering::Greater => { - return Err(DecodeError::OffsetSkipsVariableBytes(first_offset)) - } - Ordering::Equal => (), - } - - // Iterate through each pair of offsets, grabbing the slice between each of the offsets. - for pair in self.offsets.windows(2) { - let a = pair[0]; - let b = pair[1]; - - self.items[a.position] = &self.bytes[a.offset..b.offset]; - } - - // Handle the last offset, pushing a slice from it's start through to the end of - // `self.bytes`. - if let Some(last) = self.offsets.last() { - self.items[last.position] = &self.bytes[last.offset..] - } - } else { - // If the container is fixed-length, ensure there are no excess bytes. - if self.items_index != self.bytes.len() { - return Err(DecodeError::InvalidByteLength { - len: self.bytes.len(), - expected: self.items_index, - }); - } - } - - Ok(()) - } - - /// Finalizes the builder, returning a `SszDecoder` that may be used to instantiate objects. - pub fn build(mut self) -> Result, DecodeError> { - self.finalize()?; - - Ok(SszDecoder { items: self.items }) - } -} - -/// Decodes some slices of SSZ into object instances. Should be instantiated using -/// [`SszDecoderBuilder`](struct.SszDecoderBuilder.html). -/// -/// ## Example -/// -/// ```rust -/// use ssz_derive::{Encode, Decode}; -/// use ssz::{Decode, Encode, SszDecoder, SszDecoderBuilder}; -/// -/// #[derive(PartialEq, Debug, Encode, Decode)] -/// struct Foo { -/// a: u64, -/// b: Vec, -/// } -/// -/// fn ssz_decoding_example() { -/// let foo = Foo { -/// a: 42, -/// b: vec![1, 3, 3, 7] -/// }; -/// -/// let bytes = foo.as_ssz_bytes(); -/// -/// let mut builder = SszDecoderBuilder::new(&bytes); -/// -/// builder.register_type::().unwrap(); -/// builder.register_type::>().unwrap(); -/// -/// let mut decoder = builder.build().unwrap(); -/// -/// let decoded_foo = Foo { -/// a: decoder.decode_next().unwrap(), -/// b: decoder.decode_next().unwrap(), -/// }; -/// -/// assert_eq!(foo, decoded_foo); -/// } -/// -/// ``` -pub struct SszDecoder<'a> { - items: SmallVec8<&'a [u8]>, -} - -impl<'a> SszDecoder<'a> { - /// Decodes the next item. - /// - /// # Panics - /// - /// Panics when attempting to decode more items than actually exist. - pub fn decode_next(&mut self) -> Result { - self.decode_next_with(|slice| T::from_ssz_bytes(slice)) - } - - /// Decodes the next item using the provided function. - pub fn decode_next_with(&mut self, f: F) -> Result - where - F: FnOnce(&'a [u8]) -> Result, - { - f(self.items.remove(0)) - } -} - -/// Takes `bytes`, assuming it is the encoding for a SSZ union, and returns the union-selector and -/// the body (trailing bytes). -/// -/// ## Errors -/// -/// Returns an error if: -/// -/// - `bytes` is empty. -/// - the union selector is not a valid value (i.e., larger than the maximum number of variants. -pub fn split_union_bytes(bytes: &[u8]) -> Result<(UnionSelector, &[u8]), DecodeError> { - let selector = bytes - .first() - .copied() - .ok_or(DecodeError::OutOfBoundsByte { i: 0 }) - .and_then(UnionSelector::new)?; - let body = bytes - .get(1..) - .ok_or(DecodeError::OutOfBoundsByte { i: 1 })?; - Ok((selector, body)) -} - -/// Reads a `BYTES_PER_LENGTH_OFFSET`-byte length from `bytes`, where `bytes.len() >= -/// BYTES_PER_LENGTH_OFFSET`. -pub fn read_offset(bytes: &[u8]) -> Result { - decode_offset(bytes.get(0..BYTES_PER_LENGTH_OFFSET).ok_or( - DecodeError::InvalidLengthPrefix { - len: bytes.len(), - expected: BYTES_PER_LENGTH_OFFSET, - }, - )?) -} - -/// Decode bytes as a little-endian usize, returning an `Err` if `bytes.len() != -/// BYTES_PER_LENGTH_OFFSET`. -fn decode_offset(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = BYTES_PER_LENGTH_OFFSET; - - if len != expected { - Err(DecodeError::InvalidLengthPrefix { len, expected }) - } else { - let mut array: [u8; BYTES_PER_LENGTH_OFFSET] = std::default::Default::default(); - array.clone_from_slice(bytes); - - Ok(u32::from_le_bytes(array) as usize) - } -} diff --git a/consensus/ssz/src/decode/try_from_iter.rs b/consensus/ssz/src/decode/try_from_iter.rs deleted file mode 100644 index 1ff89a107f4..00000000000 --- a/consensus/ssz/src/decode/try_from_iter.rs +++ /dev/null @@ -1,103 +0,0 @@ -use smallvec::SmallVec; -use std::collections::{BTreeMap, BTreeSet}; -use std::convert::Infallible; -use std::fmt::Debug; - -/// Partial variant of `std::iter::FromIterator`. -/// -/// This trait is implemented for types which can be constructed from an iterator of decoded SSZ -/// values, but which may refuse values once a length limit is reached. -pub trait TryFromIter: Sized { - type Error: Debug; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator; -} - -// It would be nice to be able to do a blanket impl, e.g. -// -// `impl TryFromIter for C where C: FromIterator` -// -// However this runs into trait coherence issues due to the type parameter `T` on `TryFromIter`. -// -// E.g. If we added an impl downstream for `List` then another crate downstream of that -// could legally add an impl of `FromIterator for List` which would create -// two conflicting implementations for `List`. Hence the `List` impl is disallowed -// by the compiler in the presence of the blanket impl. That's obviously annoying, so we opt to -// abandon the blanket impl in favour of impls for selected types. -impl TryFromIter for Vec { - type Error = Infallible; - - fn try_from_iter(values: I) -> Result - where - I: IntoIterator, - { - // Pre-allocate the expected size of the Vec, which is parsed from the SSZ input bytes as - // `num_items`. This length has already been checked to be less than or equal to the type's - // maximum length in `decode_list_of_variable_length_items`. - let iter = values.into_iter(); - let (_, opt_max_len) = iter.size_hint(); - let mut vec = Vec::with_capacity(opt_max_len.unwrap_or(0)); - vec.extend(iter); - Ok(vec) - } -} - -impl TryFromIter for SmallVec<[T; N]> { - type Error = Infallible; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator, - { - Ok(Self::from_iter(iter)) - } -} - -impl TryFromIter<(K, V)> for BTreeMap -where - K: Ord, -{ - type Error = Infallible; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator, - { - Ok(Self::from_iter(iter)) - } -} - -impl TryFromIter for BTreeSet -where - T: Ord, -{ - type Error = Infallible; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator, - { - Ok(Self::from_iter(iter)) - } -} - -/// Partial variant of `collect`. -pub trait TryCollect: Iterator { - fn try_collect(self) -> Result - where - C: TryFromIter; -} - -impl TryCollect for I -where - I: Iterator, -{ - fn try_collect(self) -> Result - where - C: TryFromIter, - { - C::try_from_iter(self) - } -} diff --git a/consensus/ssz/src/encode.rs b/consensus/ssz/src/encode.rs deleted file mode 100644 index a46ef80e05c..00000000000 --- a/consensus/ssz/src/encode.rs +++ /dev/null @@ -1,196 +0,0 @@ -use super::*; - -mod impls; - -/// Provides SSZ encoding (serialization) via the `as_ssz_bytes(&self)` method. -/// -/// See `examples/` for manual implementations or the crate root for implementations using -/// `#[derive(Encode)]`. -pub trait Encode { - /// Returns `true` if this object has a fixed-length. - /// - /// I.e., there are no variable length items in this object or any of it's contained objects. - fn is_ssz_fixed_len() -> bool; - - /// Append the encoding `self` to `buf`. - /// - /// Note, variable length objects need only to append their "variable length" portion, they do - /// not need to provide their offset. - fn ssz_append(&self, buf: &mut Vec); - - /// The number of bytes this object occupies in the fixed-length portion of the SSZ bytes. - /// - /// By default, this is set to `BYTES_PER_LENGTH_OFFSET` which is suitable for variable length - /// objects, but not fixed-length objects. Fixed-length objects _must_ return a value which - /// represents their length. - fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - /// Returns the size (in bytes) when `self` is serialized. - /// - /// Returns the same value as `self.as_ssz_bytes().len()` but this method is significantly more - /// efficient. - fn ssz_bytes_len(&self) -> usize; - - /// Returns the full-form encoding of this object. - /// - /// The default implementation of this method should suffice for most cases. - fn as_ssz_bytes(&self) -> Vec { - let mut buf = vec![]; - - self.ssz_append(&mut buf); - - buf - } -} - -/// Allow for encoding an ordered series of distinct or indistinct objects as SSZ bytes. -/// -/// **You must call `finalize(..)` after the final `append(..)` call** to ensure the bytes are -/// written to `buf`. -/// -/// ## Example -/// -/// Use `SszEncoder` to produce identical output to `foo.as_ssz_bytes()`: -/// -/// ```rust -/// use ssz_derive::{Encode, Decode}; -/// use ssz::{Decode, Encode, SszEncoder}; -/// -/// #[derive(PartialEq, Debug, Encode, Decode)] -/// struct Foo { -/// a: u64, -/// b: Vec, -/// } -/// -/// fn ssz_encode_example() { -/// let foo = Foo { -/// a: 42, -/// b: vec![1, 3, 3, 7] -/// }; -/// -/// let mut buf: Vec = vec![]; -/// let offset = ::ssz_fixed_len() + as Encode>::ssz_fixed_len(); -/// -/// let mut encoder = SszEncoder::container(&mut buf, offset); -/// -/// encoder.append(&foo.a); -/// encoder.append(&foo.b); -/// -/// encoder.finalize(); -/// -/// assert_eq!(foo.as_ssz_bytes(), buf); -/// } -/// -/// ``` -pub struct SszEncoder<'a> { - offset: usize, - buf: &'a mut Vec, - variable_bytes: Vec, -} - -impl<'a> SszEncoder<'a> { - /// Instantiate a new encoder for encoding a SSZ container. - pub fn container(buf: &'a mut Vec, num_fixed_bytes: usize) -> Self { - buf.reserve(num_fixed_bytes); - - Self { - offset: num_fixed_bytes, - buf, - variable_bytes: vec![], - } - } - - /// Append some `item` to the SSZ bytes. - pub fn append(&mut self, item: &T) { - self.append_parameterized(T::is_ssz_fixed_len(), |buf| item.ssz_append(buf)) - } - - /// Uses `ssz_append` to append the encoding of some item to the SSZ bytes. - pub fn append_parameterized(&mut self, is_ssz_fixed_len: bool, ssz_append: F) - where - F: Fn(&mut Vec), - { - if is_ssz_fixed_len { - ssz_append(self.buf); - } else { - self.buf - .extend_from_slice(&encode_length(self.offset + self.variable_bytes.len())); - - ssz_append(&mut self.variable_bytes); - } - } - - /// Write the variable bytes to `self.bytes`. - /// - /// This method must be called after the final `append(..)` call when serializing - /// variable-length items. - pub fn finalize(&mut self) -> &mut Vec { - self.buf.append(&mut self.variable_bytes); - - self.buf - } -} - -/// Encode `len` as a little-endian byte array of `BYTES_PER_LENGTH_OFFSET` length. -/// -/// If `len` is larger than `2 ^ BYTES_PER_LENGTH_OFFSET`, a `debug_assert` is raised. -pub fn encode_length(len: usize) -> [u8; BYTES_PER_LENGTH_OFFSET] { - // Note: it is possible for `len` to be larger than what can be encoded in - // `BYTES_PER_LENGTH_OFFSET` bytes, triggering this debug assertion. - // - // These are the alternatives to using a `debug_assert` here: - // - // 1. Use `assert`. - // 2. Push an error to the caller (e.g., `Option` or `Result`). - // 3. Ignore it completely. - // - // I have avoided (1) because it's basically a choice between "produce invalid SSZ" or "kill - // the entire program". I figure it may be possible for an attacker to trigger this assert and - // take the program down -- I think producing invalid SSZ is a better option than this. - // - // I have avoided (2) because this error will need to be propagated upstream, making encoding a - // function which may fail. I don't think this is ergonomic and the upsides don't outweigh the - // downsides. - // - // I figure a `debug_assertion` is better than (3) as it will give us a change to detect the - // error during testing. - // - // If you have a different opinion, feel free to start an issue and tag @paulhauner. - debug_assert!(len <= MAX_LENGTH_VALUE); - - let mut bytes = [0; BYTES_PER_LENGTH_OFFSET]; - bytes.copy_from_slice(&len.to_le_bytes()[0..BYTES_PER_LENGTH_OFFSET]); - bytes -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_encode_length() { - assert_eq!(encode_length(0), [0; 4]); - - assert_eq!(encode_length(1), [1, 0, 0, 0]); - - assert_eq!( - encode_length(MAX_LENGTH_VALUE), - [255; BYTES_PER_LENGTH_OFFSET] - ); - } - - #[test] - #[should_panic] - #[cfg(debug_assertions)] - fn test_encode_length_above_max_debug_panics() { - encode_length(MAX_LENGTH_VALUE + 1); - } - - #[test] - #[cfg(not(debug_assertions))] - fn test_encode_length_above_max_not_debug_does_not_panic() { - assert_eq!(&encode_length(MAX_LENGTH_VALUE + 1)[..], &[0; 4]); - } -} diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs deleted file mode 100644 index 8c609d93976..00000000000 --- a/consensus/ssz/src/encode/impls.rs +++ /dev/null @@ -1,633 +0,0 @@ -use super::*; -use core::num::NonZeroUsize; -use ethereum_types::{H160, H256, U128, U256}; -use smallvec::SmallVec; -use std::collections::{BTreeMap, BTreeSet}; -use std::sync::Arc; - -macro_rules! impl_encodable_for_uint { - ($type: ident, $bit_size: expr) => { - impl Encode for $type { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - $bit_size / 8 - } - - fn ssz_bytes_len(&self) -> usize { - $bit_size / 8 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self.to_le_bytes()); - } - } - }; -} - -impl_encodable_for_uint!(u8, 8); -impl_encodable_for_uint!(u16, 16); -impl_encodable_for_uint!(u32, 32); -impl_encodable_for_uint!(u64, 64); - -#[cfg(target_pointer_width = "32")] -impl_encodable_for_uint!(usize, 32); - -#[cfg(target_pointer_width = "64")] -impl_encodable_for_uint!(usize, 64); - -// Based on the `tuple_impls` macro from the standard library. -macro_rules! impl_encode_for_tuples { - ($( - $Tuple:ident { - $(($idx:tt) -> $T:ident)+ - } - )+) => { - $( - impl<$($T: Encode),+> Encode for ($($T,)+) { - fn is_ssz_fixed_len() -> bool { - $( - <$T as Encode>::is_ssz_fixed_len() && - )* - true - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - $( - <$T as Encode>::ssz_fixed_len() + - )* - 0 - } else { - BYTES_PER_LENGTH_OFFSET - } - } - - fn ssz_bytes_len(&self) -> usize { - if ::is_ssz_fixed_len() { - ::ssz_fixed_len() - } else { - let mut len = 0; - $( - len += if <$T as Encode>::is_ssz_fixed_len() { - <$T as Encode>::ssz_fixed_len() - } else { - BYTES_PER_LENGTH_OFFSET + - self.$idx.ssz_bytes_len() - }; - )* - len - } - } - - fn ssz_append(&self, buf: &mut Vec) { - let offset = $( - <$T as Encode>::ssz_fixed_len() + - )* - 0; - - let mut encoder = SszEncoder::container(buf, offset); - - $( - encoder.append(&self.$idx); - )* - - encoder.finalize(); - } - } - )+ - } -} - -impl_encode_for_tuples! { - Tuple2 { - (0) -> A - (1) -> B - } - Tuple3 { - (0) -> A - (1) -> B - (2) -> C - } - Tuple4 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - } - Tuple5 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - } - Tuple6 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - } - Tuple7 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - } - Tuple8 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - } - Tuple9 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - } - Tuple10 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - } - Tuple11 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - (10) -> K - } - Tuple12 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - (10) -> K - (11) -> L - } -} - -impl Encode for Option { - fn is_ssz_fixed_len() -> bool { - false - } - fn ssz_append(&self, buf: &mut Vec) { - match self { - Option::None => { - let union_selector: u8 = 0u8; - buf.push(union_selector); - } - Option::Some(ref inner) => { - let union_selector: u8 = 1u8; - buf.push(union_selector); - inner.ssz_append(buf); - } - } - } - fn ssz_bytes_len(&self) -> usize { - match self { - Option::None => 1usize, - Option::Some(ref inner) => inner - .ssz_bytes_len() - .checked_add(1) - .expect("encoded length must be less than usize::max_value"), - } - } -} - -impl Encode for Arc { - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - T::ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.as_ref().ssz_append(buf) - } - - fn ssz_bytes_len(&self) -> usize { - self.as_ref().ssz_bytes_len() - } -} - -// Encode transparently through references. -impl<'a, T: Encode> Encode for &'a T { - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - T::ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - T::ssz_append(self, buf) - } - - fn ssz_bytes_len(&self) -> usize { - T::ssz_bytes_len(self) - } -} - -/// Compute the encoded length of a vector-like sequence of `T`. -pub fn sequence_ssz_bytes_len(iter: I) -> usize -where - I: Iterator + ExactSizeIterator, - T: Encode, -{ - // Compute length before doing any iteration. - let length = iter.len(); - if ::is_ssz_fixed_len() { - ::ssz_fixed_len() * length - } else { - let mut len = iter.map(|item| item.ssz_bytes_len()).sum(); - len += BYTES_PER_LENGTH_OFFSET * length; - len - } -} - -/// Encode a vector-like sequence of `T`. -pub fn sequence_ssz_append(iter: I, buf: &mut Vec) -where - I: Iterator + ExactSizeIterator, - T: Encode, -{ - if T::is_ssz_fixed_len() { - buf.reserve(T::ssz_fixed_len() * iter.len()); - - for item in iter { - item.ssz_append(buf); - } - } else { - let mut encoder = SszEncoder::container(buf, iter.len() * BYTES_PER_LENGTH_OFFSET); - - for item in iter { - encoder.append(&item); - } - - encoder.finalize(); - } -} - -macro_rules! impl_for_vec { - ($type: ty) => { - impl Encode for $type { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - sequence_ssz_bytes_len(self.iter()) - } - - fn ssz_append(&self, buf: &mut Vec) { - sequence_ssz_append(self.iter(), buf) - } - } - }; -} - -impl_for_vec!(Vec); -impl_for_vec!(SmallVec<[T; 1]>); -impl_for_vec!(SmallVec<[T; 2]>); -impl_for_vec!(SmallVec<[T; 3]>); -impl_for_vec!(SmallVec<[T; 4]>); -impl_for_vec!(SmallVec<[T; 5]>); -impl_for_vec!(SmallVec<[T; 6]>); -impl_for_vec!(SmallVec<[T; 7]>); -impl_for_vec!(SmallVec<[T; 8]>); - -impl Encode for BTreeMap -where - K: Encode + Ord, - V: Encode, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - sequence_ssz_bytes_len(self.iter()) - } - - fn ssz_append(&self, buf: &mut Vec) { - sequence_ssz_append(self.iter(), buf) - } -} - -impl Encode for BTreeSet -where - T: Encode + Ord, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - sequence_ssz_bytes_len(self.iter()) - } - - fn ssz_append(&self, buf: &mut Vec) { - sequence_ssz_append(self.iter(), buf) - } -} - -impl Encode for bool { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 1 - } - - fn ssz_bytes_len(&self) -> usize { - 1 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&(*self as u8).to_le_bytes()); - } -} - -impl Encode for NonZeroUsize { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - ::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - std::mem::size_of::() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.get().ssz_append(buf) - } -} - -impl Encode for H160 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 20 - } - - fn ssz_bytes_len(&self) -> usize { - 20 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(self.as_bytes()); - } -} - -impl Encode for H256 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 32 - } - - fn ssz_bytes_len(&self) -> usize { - 32 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(self.as_bytes()); - } -} - -impl Encode for U256 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 32 - } - - fn ssz_bytes_len(&self) -> usize { - 32 - } - - fn ssz_append(&self, buf: &mut Vec) { - let n = ::ssz_fixed_len(); - let s = buf.len(); - - buf.resize(s + n, 0); - self.to_little_endian(&mut buf[s..]); - } -} - -impl Encode for U128 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 16 - } - - fn ssz_bytes_len(&self) -> usize { - 16 - } - - fn ssz_append(&self, buf: &mut Vec) { - let n = ::ssz_fixed_len(); - let s = buf.len(); - - buf.resize(s + n, 0); - self.to_little_endian(&mut buf[s..]); - } -} - -macro_rules! impl_encodable_for_u8_array { - ($len: expr) => { - impl Encode for [u8; $len] { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - $len - } - - fn ssz_bytes_len(&self) -> usize { - $len - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self[..]); - } - } - }; -} - -impl_encodable_for_u8_array!(4); -impl_encodable_for_u8_array!(32); -impl_encodable_for_u8_array!(48); - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn vec_of_u8() { - let vec: Vec = vec![]; - assert_eq!(vec.as_ssz_bytes(), vec![]); - - let vec: Vec = vec![1]; - assert_eq!(vec.as_ssz_bytes(), vec![1]); - - let vec: Vec = vec![0, 1, 2, 3]; - assert_eq!(vec.as_ssz_bytes(), vec![0, 1, 2, 3]); - } - - #[test] - fn vec_of_vec_of_u8() { - let vec: Vec> = vec![]; - assert_eq!(vec.as_ssz_bytes(), vec![]); - - let vec: Vec> = vec![vec![]]; - assert_eq!(vec.as_ssz_bytes(), vec![4, 0, 0, 0]); - - let vec: Vec> = vec![vec![], vec![]]; - assert_eq!(vec.as_ssz_bytes(), vec![8, 0, 0, 0, 8, 0, 0, 0]); - - let vec: Vec> = vec![vec![0, 1, 2], vec![11, 22, 33]]; - assert_eq!( - vec.as_ssz_bytes(), - vec![8, 0, 0, 0, 11, 0, 0, 0, 0, 1, 2, 11, 22, 33] - ); - } - - #[test] - fn ssz_encode_u8() { - assert_eq!(0_u8.as_ssz_bytes(), vec![0]); - assert_eq!(1_u8.as_ssz_bytes(), vec![1]); - assert_eq!(100_u8.as_ssz_bytes(), vec![100]); - assert_eq!(255_u8.as_ssz_bytes(), vec![255]); - } - - #[test] - fn ssz_encode_u16() { - assert_eq!(1_u16.as_ssz_bytes(), vec![1, 0]); - assert_eq!(100_u16.as_ssz_bytes(), vec![100, 0]); - assert_eq!((1_u16 << 8).as_ssz_bytes(), vec![0, 1]); - assert_eq!(65535_u16.as_ssz_bytes(), vec![255, 255]); - } - - #[test] - fn ssz_encode_u32() { - assert_eq!(1_u32.as_ssz_bytes(), vec![1, 0, 0, 0]); - assert_eq!(100_u32.as_ssz_bytes(), vec![100, 0, 0, 0]); - assert_eq!((1_u32 << 16).as_ssz_bytes(), vec![0, 0, 1, 0]); - assert_eq!((1_u32 << 24).as_ssz_bytes(), vec![0, 0, 0, 1]); - assert_eq!((!0_u32).as_ssz_bytes(), vec![255, 255, 255, 255]); - } - - #[test] - fn ssz_encode_u64() { - assert_eq!(1_u64.as_ssz_bytes(), vec![1, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!( - (!0_u64).as_ssz_bytes(), - vec![255, 255, 255, 255, 255, 255, 255, 255] - ); - } - - #[test] - fn ssz_encode_usize() { - assert_eq!(1_usize.as_ssz_bytes(), vec![1, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!( - (!0_usize).as_ssz_bytes(), - vec![255, 255, 255, 255, 255, 255, 255, 255] - ); - } - - #[test] - fn ssz_encode_option_u8() { - let opt: Option = None; - assert_eq!(opt.as_ssz_bytes(), vec![0]); - let opt: Option = Some(2); - assert_eq!(opt.as_ssz_bytes(), vec![1, 2]); - } - - #[test] - fn ssz_encode_bool() { - assert_eq!(true.as_ssz_bytes(), vec![1]); - assert_eq!(false.as_ssz_bytes(), vec![0]); - } - - #[test] - fn ssz_encode_h256() { - assert_eq!(H256::from(&[0; 32]).as_ssz_bytes(), vec![0; 32]); - assert_eq!(H256::from(&[1; 32]).as_ssz_bytes(), vec![1; 32]); - - let bytes = vec![ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]; - - assert_eq!(H256::from_slice(&bytes).as_ssz_bytes(), bytes); - } - - #[test] - fn ssz_encode_u8_array_4() { - assert_eq!([0, 0, 0, 0].as_ssz_bytes(), vec![0; 4]); - assert_eq!([1, 0, 0, 0].as_ssz_bytes(), vec![1, 0, 0, 0]); - assert_eq!([1, 2, 3, 4].as_ssz_bytes(), vec![1, 2, 3, 4]); - } - - #[test] - fn tuple() { - assert_eq!((10u8, 11u8).as_ssz_bytes(), vec![10, 11]); - assert_eq!((10u32, 11u8).as_ssz_bytes(), vec![10, 0, 0, 0, 11]); - assert_eq!((10u8, 11u8, 12u8).as_ssz_bytes(), vec![10, 11, 12]); - } -} diff --git a/consensus/ssz/src/legacy.rs b/consensus/ssz/src/legacy.rs deleted file mode 100644 index 4953db057de..00000000000 --- a/consensus/ssz/src/legacy.rs +++ /dev/null @@ -1,265 +0,0 @@ -//! Provides a "legacy" version of SSZ encoding for `Option where T: Encode + Decode`. -//! -//! The SSZ specification changed in 2021 to use a 1-byte union selector, instead of a 4-byte one -//! which was used in the Lighthouse database. -//! -//! Users can use the `four_byte_option_impl` macro to define a module that can be used with the -//! `#[ssz(with = "module")]`. -//! -//! ## Example -//! -//! ```rust -//! use ssz_derive::{Encode, Decode}; -//! use ssz::four_byte_option_impl; -//! -//! four_byte_option_impl!(impl_for_u64, u64); -//! -//! #[derive(Encode, Decode)] -//! struct Foo { -//! #[ssz(with = "impl_for_u64")] -//! a: Option, -//! } -//! ``` - -use crate::*; - -#[macro_export] -macro_rules! four_byte_option_impl { - ($mod_name: ident, $type: ty) => { - #[allow(dead_code)] - mod $mod_name { - use super::*; - - pub mod encode { - use super::*; - #[allow(unused_imports)] - use ssz::*; - - pub fn is_ssz_fixed_len() -> bool { - false - } - - pub fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - pub fn ssz_bytes_len(opt: &Option<$type>) -> usize { - if let Some(some) = opt { - let len = if <$type as Encode>::is_ssz_fixed_len() { - <$type as Encode>::ssz_fixed_len() - } else { - <$type as Encode>::ssz_bytes_len(some) - }; - len + BYTES_PER_LENGTH_OFFSET - } else { - BYTES_PER_LENGTH_OFFSET - } - } - - pub fn ssz_append(opt: &Option<$type>, buf: &mut Vec) { - match opt { - None => buf.extend_from_slice(&legacy::encode_four_byte_union_selector(0)), - Some(t) => { - buf.extend_from_slice(&legacy::encode_four_byte_union_selector(1)); - t.ssz_append(buf); - } - } - } - - pub fn as_ssz_bytes(opt: &Option<$type>) -> Vec { - let mut buf = vec![]; - - ssz_append(opt, &mut buf); - - buf - } - } - - pub mod decode { - use super::*; - #[allow(unused_imports)] - use ssz::*; - - pub fn is_ssz_fixed_len() -> bool { - false - } - - pub fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - pub fn from_ssz_bytes(bytes: &[u8]) -> Result, DecodeError> { - if bytes.len() < BYTES_PER_LENGTH_OFFSET { - return Err(DecodeError::InvalidByteLength { - len: bytes.len(), - expected: BYTES_PER_LENGTH_OFFSET, - }); - } - - let (index_bytes, value_bytes) = bytes.split_at(BYTES_PER_LENGTH_OFFSET); - - let index = legacy::read_four_byte_union_selector(index_bytes)?; - if index == 0 { - Ok(None) - } else if index == 1 { - Ok(Some(<$type as ssz::Decode>::from_ssz_bytes(value_bytes)?)) - } else { - Err(DecodeError::BytesInvalid(format!( - "{} is not a valid union index for Option", - index - ))) - } - } - } - } - }; -} - -pub fn encode_four_byte_union_selector(selector: usize) -> [u8; BYTES_PER_LENGTH_OFFSET] { - encode_length(selector) -} - -pub fn read_four_byte_union_selector(bytes: &[u8]) -> Result { - read_offset(bytes) -} - -#[cfg(test)] -mod test { - use super::*; - use crate as ssz; - use ssz_derive::{Decode, Encode}; - - type VecU16 = Vec; - - four_byte_option_impl!(impl_u16, u16); - four_byte_option_impl!(impl_vec_u16, VecU16); - - #[test] - fn ssz_encode_option_u16() { - let item = Some(65535_u16); - let bytes = vec![1, 0, 0, 0, 255, 255]; - assert_eq!(impl_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); - - let item = None; - let bytes = vec![0, 0, 0, 0]; - assert_eq!(impl_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_u16::decode::from_ssz_bytes(&bytes).unwrap(), None); - } - - #[test] - fn ssz_encode_option_vec_u16() { - let item = Some(vec![0_u16, 1]); - let bytes = vec![1, 0, 0, 0, 0, 0, 1, 0]; - assert_eq!(impl_vec_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_vec_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); - - let item = None; - let bytes = vec![0, 0, 0, 0]; - assert_eq!(impl_vec_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_vec_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); - } - - fn round_trip(items: Vec) { - for item in items { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct TwoVariableLenOptions { - a: u16, - #[ssz(with = "impl_u16")] - b: Option, - #[ssz(with = "impl_vec_u16")] - c: Option>, - #[ssz(with = "impl_vec_u16")] - d: Option>, - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn two_variable_len_options_encoding() { - let s = TwoVariableLenOptions { - a: 42, - b: None, - c: Some(vec![0]), - d: None, - }; - - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 - // | option | offset | offset | option = vec![ - TwoVariableLenOptions { - a: 42, - b: Some(12), - c: Some(vec![0]), - d: Some(vec![1]), - }, - TwoVariableLenOptions { - a: 42, - b: Some(12), - c: Some(vec![0]), - d: None, - }, - TwoVariableLenOptions { - a: 42, - b: None, - c: Some(vec![0]), - d: None, - }, - TwoVariableLenOptions { - a: 42, - b: None, - c: None, - d: None, - }, - ]; - - round_trip(vec); - } - - #[test] - fn tuple_u8_u16() { - let vec: Vec<(u8, u16)> = vec![ - (0, 0), - (0, 1), - (1, 0), - (u8::max_value(), u16::max_value()), - (0, u16::max_value()), - (u8::max_value(), 0), - (42, 12301), - ]; - - round_trip(vec); - } - - #[test] - fn tuple_vec_vec() { - let vec: Vec<(u64, Vec, Vec>)> = vec![ - (0, vec![], vec![vec![]]), - (99, vec![101], vec![vec![], vec![]]), - ( - 42, - vec![12, 13, 14], - vec![vec![99, 98, 97, 96], vec![42, 44, 46, 48, 50]], - ), - ]; - - round_trip(vec); - } -} diff --git a/consensus/ssz/src/lib.rs b/consensus/ssz/src/lib.rs deleted file mode 100644 index e71157a3eed..00000000000 --- a/consensus/ssz/src/lib.rs +++ /dev/null @@ -1,71 +0,0 @@ -//! Provides encoding (serialization) and decoding (deserialization) in the SimpleSerialize (SSZ) -//! format designed for use in Ethereum 2.0. -//! -//! Adheres to the Ethereum 2.0 [SSZ -//! specification](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/ssz/simple-serialize.md) -//! at v0.12.1. -//! -//! ## Example -//! -//! ```rust -//! use ssz_derive::{Encode, Decode}; -//! use ssz::{Decode, Encode}; -//! -//! #[derive(PartialEq, Debug, Encode, Decode)] -//! struct Foo { -//! a: u64, -//! b: Vec, -//! } -//! -//! fn ssz_encode_decode_example() { -//! let foo = Foo { -//! a: 42, -//! b: vec![1, 3, 3, 7] -//! }; -//! -//! let ssz_bytes: Vec = foo.as_ssz_bytes(); -//! -//! let decoded_foo = Foo::from_ssz_bytes(&ssz_bytes).unwrap(); -//! -//! assert_eq!(foo, decoded_foo); -//! } -//! -//! ``` -//! -//! See `examples/` for manual implementations of the `Encode` and `Decode` traits. - -mod decode; -mod encode; -pub mod legacy; -mod union_selector; - -pub use decode::{ - impls::decode_list_of_variable_length_items, read_offset, split_union_bytes, - try_from_iter::TryFromIter, Decode, DecodeError, SszDecoder, SszDecoderBuilder, -}; -pub use encode::{encode_length, Encode, SszEncoder}; -pub use union_selector::UnionSelector; - -/// The number of bytes used to represent an offset. -pub const BYTES_PER_LENGTH_OFFSET: usize = 4; -/// The maximum value that can be represented using `BYTES_PER_LENGTH_OFFSET`. -#[cfg(target_pointer_width = "32")] -pub const MAX_LENGTH_VALUE: usize = (std::u32::MAX >> (8 * (4 - BYTES_PER_LENGTH_OFFSET))) as usize; -#[cfg(target_pointer_width = "64")] -pub const MAX_LENGTH_VALUE: usize = (std::u64::MAX >> (8 * (8 - BYTES_PER_LENGTH_OFFSET))) as usize; - -/// The number of bytes used to indicate the variant of a union. -pub const BYTES_PER_UNION_SELECTOR: usize = 1; -/// The highest possible union selector value (higher values are reserved for backwards compatible -/// extensions). -pub const MAX_UNION_SELECTOR: u8 = 127; - -/// Convenience function to SSZ encode an object supporting ssz::Encode. -/// -/// Equivalent to `val.as_ssz_bytes()`. -pub fn ssz_encode(val: &T) -> Vec -where - T: Encode, -{ - val.as_ssz_bytes() -} diff --git a/consensus/ssz/src/union_selector.rs b/consensus/ssz/src/union_selector.rs deleted file mode 100644 index 18bab094aab..00000000000 --- a/consensus/ssz/src/union_selector.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::*; - -/// Provides the one-byte "selector" from the SSZ union specification: -/// -/// https://github.com/ethereum/consensus-specs/blob/v1.1.0-beta.3/ssz/simple-serialize.md#union -#[derive(Copy, Clone)] -pub struct UnionSelector(u8); - -impl From for u8 { - fn from(union_selector: UnionSelector) -> u8 { - union_selector.0 - } -} - -impl PartialEq for UnionSelector { - fn eq(&self, other: &u8) -> bool { - self.0 == *other - } -} - -impl UnionSelector { - /// Instantiate `self`, returning an error if `selector > MAX_UNION_SELECTOR`. - pub fn new(selector: u8) -> Result { - Some(selector) - .filter(|_| selector <= MAX_UNION_SELECTOR) - .map(Self) - .ok_or(DecodeError::UnionSelectorInvalid(selector)) - } -} diff --git a/consensus/ssz/tests/tests.rs b/consensus/ssz/tests/tests.rs deleted file mode 100644 index f52d2c5cdfe..00000000000 --- a/consensus/ssz/tests/tests.rs +++ /dev/null @@ -1,390 +0,0 @@ -use ethereum_types::H256; -use ssz::{Decode, DecodeError, Encode}; -use ssz_derive::{Decode, Encode}; - -mod round_trip { - use super::*; - use std::collections::BTreeMap; - use std::iter::FromIterator; - - fn round_trip(items: Vec) { - for item in items { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - } - - #[test] - fn bool() { - let items: Vec = vec![true, false]; - - round_trip(items); - } - - #[test] - fn option_u16() { - let items: Vec> = vec![None, Some(2u16)]; - - round_trip(items); - } - - #[test] - fn u8_array_4() { - let items: Vec<[u8; 4]> = vec![[0, 0, 0, 0], [1, 0, 0, 0], [1, 2, 3, 4], [1, 2, 0, 4]]; - - round_trip(items); - } - - #[test] - fn h256() { - let items: Vec = vec![H256::zero(), H256::from([1; 32]), H256::random()]; - - round_trip(items); - } - - #[test] - fn vec_of_h256() { - let items: Vec> = vec![ - vec![], - vec![H256::zero(), H256::from([1; 32]), H256::random()], - ]; - - round_trip(items); - } - - #[test] - fn option_vec_h256() { - let items: Vec>> = vec![ - None, - Some(vec![]), - Some(vec![H256::zero(), H256::from([1; 32]), H256::random()]), - ]; - - round_trip(items); - } - - #[test] - fn vec_u16() { - let items: Vec> = vec![ - vec![], - vec![255], - vec![0, 1, 2], - vec![100; 64], - vec![255, 0, 255], - ]; - - round_trip(items); - } - - #[test] - fn vec_of_vec_u16() { - let items: Vec>> = vec![ - vec![], - vec![vec![]], - vec![vec![1, 2, 3]], - vec![vec![], vec![]], - vec![vec![], vec![1, 2, 3]], - vec![vec![1, 2, 3], vec![1, 2, 3]], - vec![vec![1, 2, 3], vec![], vec![1, 2, 3]], - vec![vec![], vec![], vec![1, 2, 3]], - vec![vec![], vec![1], vec![1, 2, 3]], - vec![vec![], vec![1], vec![1, 2, 3]], - ]; - - round_trip(items); - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct FixedLen { - a: u16, - b: u64, - c: u32, - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn fixed_len_struct_encoding() { - let items: Vec = vec![ - FixedLen { a: 0, b: 0, c: 0 }, - FixedLen { a: 1, b: 1, c: 1 }, - FixedLen { a: 1, b: 0, c: 1 }, - ]; - - let expected_encodings = vec![ - // | u16--| u64----------------------------| u32----------| - vec![00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], - vec![01, 00, 01, 00, 00, 00, 00, 00, 00, 00, 01, 00, 00, 00], - vec![01, 00, 00, 00, 00, 00, 00, 00, 00, 00, 01, 00, 00, 00], - ]; - - for i in 0..items.len() { - assert_eq!( - items[i].as_ssz_bytes(), - expected_encodings[i], - "Failed on {}", - i - ); - } - } - - #[test] - fn fixed_len_excess_bytes() { - let fixed = FixedLen { a: 1, b: 2, c: 3 }; - - let mut bytes = fixed.as_ssz_bytes(); - bytes.append(&mut vec![0]); - - assert_eq!( - FixedLen::from_ssz_bytes(&bytes), - Err(DecodeError::InvalidByteLength { - len: 15, - expected: 14, - }) - ); - } - - #[test] - fn vec_of_fixed_len_struct() { - let items: Vec = vec![ - FixedLen { a: 0, b: 0, c: 0 }, - FixedLen { a: 1, b: 1, c: 1 }, - FixedLen { a: 1, b: 0, c: 1 }, - ]; - - round_trip(items); - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct VariableLen { - a: u16, - b: Vec, - c: u32, - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn offset_into_fixed_bytes() { - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - // | offset | u32 | variable - 01, 00, 09, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, - ]; - - assert_eq!( - VariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OffsetIntoFixedPortion(9)) - ); - } - - #[test] - fn variable_len_excess_bytes() { - let variable = VariableLen { - a: 1, - b: vec![2], - c: 3, - }; - - let mut bytes = variable.as_ssz_bytes(); - bytes.append(&mut vec![0]); - - // The error message triggered is not so helpful, it's caught by a side-effect. Just - // checking there is _some_ error is fine. - assert!(VariableLen::from_ssz_bytes(&bytes).is_err()); - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn first_offset_skips_byte() { - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - // | offset | u32 | variable - 01, 00, 11, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, - ]; - - assert_eq!( - VariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OffsetSkipsVariableBytes(11)) - ); - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn variable_len_struct_encoding() { - let items: Vec = vec![ - VariableLen { - a: 0, - b: vec![], - c: 0, - }, - VariableLen { - a: 1, - b: vec![0], - c: 1, - }, - VariableLen { - a: 1, - b: vec![0, 1, 2], - c: 1, - }, - ]; - - let expected_encodings = vec![ - // 00..................................09 - // | u16--| vec offset-----| u32------------| vec payload --------| - vec![00, 00, 10, 00, 00, 00, 00, 00, 00, 00], - vec![01, 00, 10, 00, 00, 00, 01, 00, 00, 00, 00, 00], - vec![ - 01, 00, 10, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, - ], - ]; - - for i in 0..items.len() { - assert_eq!( - items[i].as_ssz_bytes(), - expected_encodings[i], - "Failed on {}", - i - ); - } - } - - #[test] - fn vec_of_variable_len_struct() { - let items: Vec = vec![ - VariableLen { - a: 0, - b: vec![], - c: 0, - }, - VariableLen { - a: 255, - b: vec![0, 1, 2, 3], - c: 99, - }, - VariableLen { - a: 255, - b: vec![0], - c: 99, - }, - VariableLen { - a: 50, - b: vec![0], - c: 0, - }, - ]; - - round_trip(items); - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct ThreeVariableLen { - a: u16, - b: Vec, - c: Vec, - d: Vec, - } - - #[test] - fn three_variable_len() { - let vec: Vec = vec![ThreeVariableLen { - a: 42, - b: vec![0], - c: vec![1], - d: vec![2], - }]; - - round_trip(vec); - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn offsets_decreasing() { - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - // | offset | offset | offset | variable - 01, 00, 14, 00, 00, 00, 15, 00, 00, 00, 14, 00, 00, 00, 00, 00, - ]; - - assert_eq!( - ThreeVariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OffsetsAreDecreasing(14)) - ); - } - - #[test] - fn tuple_u8_u16() { - let vec: Vec<(u8, u16)> = vec![ - (0, 0), - (0, 1), - (1, 0), - (u8::max_value(), u16::max_value()), - (0, u16::max_value()), - (u8::max_value(), 0), - (42, 12301), - ]; - - round_trip(vec); - } - - #[test] - fn tuple_vec_vec() { - let vec: Vec<(u64, Vec, Vec>)> = vec![ - (0, vec![], vec![vec![]]), - (99, vec![101], vec![vec![], vec![]]), - ( - 42, - vec![12, 13, 14], - vec![vec![99, 98, 97, 96], vec![42, 44, 46, 48, 50]], - ), - ]; - - round_trip(vec); - } - - #[test] - fn btree_map_fixed() { - let data = vec![ - BTreeMap::new(), - BTreeMap::from_iter(vec![(0u8, 0u16), (1, 2), (2, 4), (4, 6)]), - ]; - round_trip(data); - } - - #[test] - fn btree_map_variable_value() { - let data = vec![ - BTreeMap::new(), - BTreeMap::from_iter(vec![ - ( - 0u64, - ThreeVariableLen { - a: 1, - b: vec![3, 5, 7], - c: vec![], - d: vec![0, 0], - }, - ), - ( - 1, - ThreeVariableLen { - a: 99, - b: vec![1], - c: vec![2, 3, 4, 5, 6, 7, 8, 9, 10], - d: vec![4, 5, 6, 7, 8], - }, - ), - ( - 2, - ThreeVariableLen { - a: 0, - b: vec![], - c: vec![], - d: vec![], - }, - ), - ]), - ]; - round_trip(data); - } -} diff --git a/consensus/ssz_derive/Cargo.toml b/consensus/ssz_derive/Cargo.toml deleted file mode 100644 index d3b2865a61d..00000000000 --- a/consensus/ssz_derive/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "eth2_ssz_derive" -version = "0.3.1" -authors = ["Paul Hauner "] -edition = "2021" -description = "Procedural derive macros to accompany the eth2_ssz crate." -license = "Apache-2.0" - -[lib] -name = "ssz_derive" -proc-macro = true - -[dependencies] -syn = "1.0.42" -proc-macro2 = "1.0.23" -quote = "1.0.7" -darling = "0.13.0" - -[dev-dependencies] -eth2_ssz = "0.4.1" diff --git a/consensus/ssz_types/Cargo.toml b/consensus/ssz_types/Cargo.toml deleted file mode 100644 index 2baa8994fb8..00000000000 --- a/consensus/ssz_types/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "eth2_ssz_types" -version = "0.2.2" -authors = ["Paul Hauner "] -edition = "2021" -description = "Provides types with unique properties required for SSZ serialization and Merklization." -license = "Apache-2.0" - -[lib] -name = "ssz_types" - -[dependencies] -tree_hash = "0.4.1" -serde = "1.0.116" -serde_derive = "1.0.116" -eth2_serde_utils = "0.1.1" -eth2_ssz = "0.4.1" -typenum = "1.12.0" -arbitrary = { version = "1.0", features = ["derive"], optional = true } -derivative = "2.1.1" -smallvec = "1.8.0" - -[dev-dependencies] -serde_json = "1.0.58" -tree_hash_derive = "0.4.0" diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs deleted file mode 100644 index b7bde225786..00000000000 --- a/consensus/ssz_types/src/bitfield.rs +++ /dev/null @@ -1,1332 +0,0 @@ -use crate::tree_hash::bitfield_bytes_tree_hash_root; -use crate::Error; -use core::marker::PhantomData; -use derivative::Derivative; -use eth2_serde_utils::hex::{encode as hex_encode, PrefixedHexVisitor}; -use serde::de::{Deserialize, Deserializer}; -use serde::ser::{Serialize, Serializer}; -use smallvec::{smallvec, SmallVec, ToSmallVec}; -use ssz::{Decode, Encode}; -use tree_hash::Hash256; -use typenum::Unsigned; - -/// Maximum number of bytes to store on the stack in a bitfield's `SmallVec`. -/// -/// The default of 32 bytes is enough to take us through to ~500K validators, as the byte length of -/// attestation bitfields is roughly `N // 32 slots // 64 committes // 8 bits`. -pub const SMALLVEC_LEN: usize = 32; - -/// A marker trait applied to `Variable` and `Fixed` that defines the behaviour of a `Bitfield`. -pub trait BitfieldBehaviour: Clone {} - -/// A marker struct used to declare SSZ `Variable` behaviour on a `Bitfield`. -/// -/// See the [`Bitfield`](struct.Bitfield.html) docs for usage. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Variable { - _phantom: PhantomData, -} - -/// A marker struct used to declare SSZ `Fixed` behaviour on a `Bitfield`. -/// -/// See the [`Bitfield`](struct.Bitfield.html) docs for usage. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Fixed { - _phantom: PhantomData, -} - -impl BitfieldBehaviour for Variable {} -impl BitfieldBehaviour for Fixed {} - -/// A heap-allocated, ordered, variable-length collection of `bool` values, limited to `N` bits. -pub type BitList = Bitfield>; - -/// A heap-allocated, ordered, fixed-length collection of `bool` values, with `N` bits. -/// -/// See [Bitfield](struct.Bitfield.html) documentation. -pub type BitVector = Bitfield>; - -/// A heap-allocated, ordered, fixed-length, collection of `bool` values. Use of -/// [`BitList`](type.BitList.html) or [`BitVector`](type.BitVector.html) type aliases is preferred -/// over direct use of this struct. -/// -/// The `T` type parameter is used to define length behaviour with the `Variable` or `Fixed` marker -/// structs. -/// -/// The length of the Bitfield is set at instantiation (i.e., runtime, not compile time). However, -/// use with a `Variable` sets a type-level (i.e., compile-time) maximum length and `Fixed` -/// provides a type-level fixed length. -/// -/// ## Example -/// -/// The example uses the following crate-level type aliases: -/// -/// - `BitList` is an alias for `Bitfield>` -/// - `BitVector` is an alias for `Bitfield>` -/// -/// ``` -/// use ssz_types::{BitVector, BitList, typenum}; -/// -/// // `BitList` has a type-level maximum length. The length of the list is specified at runtime -/// // and it must be less than or equal to `N`. After instantiation, `BitList` cannot grow or -/// // shrink. -/// type BitList8 = BitList; -/// -/// // Creating a `BitList` with a larger-than-`N` capacity returns `None`. -/// assert!(BitList8::with_capacity(9).is_err()); -/// -/// let mut bitlist = BitList8::with_capacity(4).unwrap(); // `BitList` permits a capacity of less than the maximum. -/// assert!(bitlist.set(3, true).is_ok()); // Setting inside the instantiation capacity is permitted. -/// assert!(bitlist.set(5, true).is_err()); // Setting outside that capacity is not. -/// -/// // `BitVector` has a type-level fixed length. Unlike `BitList`, it cannot be instantiated with a custom length -/// // or grow/shrink. -/// type BitVector8 = BitVector; -/// -/// let mut bitvector = BitVector8::new(); -/// assert_eq!(bitvector.len(), 8); // `BitVector` length is fixed at the type-level. -/// assert!(bitvector.set(7, true).is_ok()); // Setting inside the capacity is permitted. -/// assert!(bitvector.set(9, true).is_err()); // Setting outside the capacity is not. -/// -/// ``` -/// -/// ## Note -/// -/// The internal representation of the bitfield is the same as that required by SSZ. The lowest -/// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest -/// bit-index. E.g., `smallvec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set. -#[derive(Clone, Debug, Derivative)] -#[derivative(PartialEq, Eq, Hash(bound = ""))] -pub struct Bitfield { - bytes: SmallVec<[u8; SMALLVEC_LEN]>, - len: usize, - _phantom: PhantomData, -} - -impl Bitfield> { - /// Instantiate with capacity for `num_bits` boolean values. The length cannot be grown or - /// shrunk after instantiation. - /// - /// All bits are initialized to `false`. - /// - /// Returns `None` if `num_bits > N`. - pub fn with_capacity(num_bits: usize) -> Result { - if num_bits <= N::to_usize() { - Ok(Self { - bytes: smallvec![0; bytes_for_bit_len(num_bits)], - len: num_bits, - _phantom: PhantomData, - }) - } else { - Err(Error::OutOfBounds { - i: Self::max_len(), - len: Self::max_len(), - }) - } - } - - /// Equal to `N` regardless of the value supplied to `with_capacity`. - pub fn max_len() -> usize { - N::to_usize() - } - - /// Consumes `self`, returning a serialized representation. - /// - /// The output is faithful to the SSZ encoding of `self`, such that a leading `true` bit is - /// used to indicate the length of the bitfield. - /// - /// ## Example - /// ``` - /// use ssz_types::{BitList, typenum}; - /// use smallvec::SmallVec; - /// - /// type BitList8 = BitList; - /// - /// let b = BitList8::with_capacity(4).unwrap(); - /// - /// assert_eq!(b.into_bytes(), SmallVec::from_buf([0b0001_0000])); - /// ``` - pub fn into_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { - let len = self.len(); - let mut bytes = self.bytes; - - bytes.resize(bytes_for_bit_len(len + 1), 0); - - let mut bitfield: Bitfield> = Bitfield::from_raw_bytes(bytes, len + 1) - .unwrap_or_else(|_| { - unreachable!( - "Bitfield with {} bytes must have enough capacity for {} bits.", - bytes_for_bit_len(len + 1), - len + 1 - ) - }); - bitfield - .set(len, true) - .expect("len must be in bounds for bitfield."); - - bitfield.bytes - } - - /// Instantiates a new instance from `bytes`. Consumes the same format that `self.into_bytes()` - /// produces (SSZ). - /// - /// Returns `None` if `bytes` are not a valid encoding. - pub fn from_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>) -> Result { - let bytes_len = bytes.len(); - let mut initial_bitfield: Bitfield> = { - let num_bits = bytes.len() * 8; - Bitfield::from_raw_bytes(bytes, num_bits)? - }; - - let len = initial_bitfield - .highest_set_bit() - .ok_or(Error::MissingLengthInformation)?; - - // The length bit should be in the last byte, or else it means we have too many bytes. - if len / 8 + 1 != bytes_len { - return Err(Error::InvalidByteCount { - given: bytes_len, - expected: len / 8 + 1, - }); - } - - if len <= Self::max_len() { - initial_bitfield - .set(len, false) - .expect("Bit has been confirmed to exist"); - - let mut bytes = initial_bitfield.into_raw_bytes(); - - bytes.truncate(bytes_for_bit_len(len)); - - Self::from_raw_bytes(bytes, len) - } else { - Err(Error::OutOfBounds { - i: Self::max_len(), - len: Self::max_len(), - }) - } - } - - /// Compute the intersection of two BitLists of potentially different lengths. - /// - /// Return a new BitList with length equal to the shorter of the two inputs. - pub fn intersection(&self, other: &Self) -> Self { - let min_len = std::cmp::min(self.len(), other.len()); - let mut result = Self::with_capacity(min_len).expect("min len always less than N"); - // Bitwise-and the bytes together, starting from the left of each vector. This takes care - // of masking out any entries beyond `min_len` as well, assuming the bitfield doesn't - // contain any set bits beyond its length. - for i in 0..result.bytes.len() { - result.bytes[i] = self.bytes[i] & other.bytes[i]; - } - result - } - - /// Compute the union of two BitLists of potentially different lengths. - /// - /// Return a new BitList with length equal to the longer of the two inputs. - pub fn union(&self, other: &Self) -> Self { - let max_len = std::cmp::max(self.len(), other.len()); - let mut result = Self::with_capacity(max_len).expect("max len always less than N"); - for i in 0..result.bytes.len() { - result.bytes[i] = - self.bytes.get(i).copied().unwrap_or(0) | other.bytes.get(i).copied().unwrap_or(0); - } - result - } -} - -impl Bitfield> { - /// Instantiate a new `Bitfield` with a fixed-length of `N` bits. - /// - /// All bits are initialized to `false`. - pub fn new() -> Self { - Self { - bytes: smallvec![0; bytes_for_bit_len(Self::capacity())], - len: Self::capacity(), - _phantom: PhantomData, - } - } - - /// Returns `N`, the number of bits in `Self`. - pub fn capacity() -> usize { - N::to_usize() - } - - /// Consumes `self`, returning a serialized representation. - /// - /// The output is faithful to the SSZ encoding of `self`. - /// - /// ## Example - /// ``` - /// use ssz_types::{BitVector, typenum}; - /// use smallvec::SmallVec; - /// - /// type BitVector4 = BitVector; - /// - /// assert_eq!(BitVector4::new().into_bytes(), SmallVec::from_buf([0b0000_0000])); - /// ``` - pub fn into_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { - self.into_raw_bytes() - } - - /// Instantiates a new instance from `bytes`. Consumes the same format that `self.into_bytes()` - /// produces (SSZ). - /// - /// Returns `None` if `bytes` are not a valid encoding. - pub fn from_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>) -> Result { - Self::from_raw_bytes(bytes, Self::capacity()) - } - - /// Compute the intersection of two fixed-length `Bitfield`s. - /// - /// Return a new fixed-length `Bitfield`. - pub fn intersection(&self, other: &Self) -> Self { - let mut result = Self::new(); - // Bitwise-and the bytes together, starting from the left of each vector. This takes care - // of masking out any entries beyond `min_len` as well, assuming the bitfield doesn't - // contain any set bits beyond its length. - for i in 0..result.bytes.len() { - result.bytes[i] = self.bytes[i] & other.bytes[i]; - } - result - } - - /// Compute the union of two fixed-length `Bitfield`s. - /// - /// Return a new fixed-length `Bitfield`. - pub fn union(&self, other: &Self) -> Self { - let mut result = Self::new(); - for i in 0..result.bytes.len() { - result.bytes[i] = - self.bytes.get(i).copied().unwrap_or(0) | other.bytes.get(i).copied().unwrap_or(0); - } - result - } -} - -impl Default for Bitfield> { - fn default() -> Self { - Self::new() - } -} - -impl Bitfield { - /// Sets the `i`'th bit to `value`. - /// - /// Returns `None` if `i` is out-of-bounds of `self`. - pub fn set(&mut self, i: usize, value: bool) -> Result<(), Error> { - let len = self.len; - - if i < len { - let byte = self - .bytes - .get_mut(i / 8) - .ok_or(Error::OutOfBounds { i, len })?; - - if value { - *byte |= 1 << (i % 8) - } else { - *byte &= !(1 << (i % 8)) - } - - Ok(()) - } else { - Err(Error::OutOfBounds { i, len: self.len }) - } - } - - /// Returns the value of the `i`'th bit. - /// - /// Returns `Error` if `i` is out-of-bounds of `self`. - pub fn get(&self, i: usize) -> Result { - if i < self.len { - let byte = self - .bytes - .get(i / 8) - .ok_or(Error::OutOfBounds { i, len: self.len })?; - - Ok(*byte & 1 << (i % 8) > 0) - } else { - Err(Error::OutOfBounds { i, len: self.len }) - } - } - - /// Returns the number of bits stored in `self`. - pub fn len(&self) -> usize { - self.len - } - - /// Returns `true` if `self.len() == 0`. - pub fn is_empty(&self) -> bool { - self.len == 0 - } - - /// Returns the underlying bytes representation of the bitfield. - pub fn into_raw_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { - self.bytes - } - - /// Returns a view into the underlying bytes representation of the bitfield. - pub fn as_slice(&self) -> &[u8] { - &self.bytes - } - - /// Instantiates from the given `bytes`, which are the same format as output from - /// `self.into_raw_bytes()`. - /// - /// Returns `None` if: - /// - /// - `bytes` is not the minimal required bytes to represent a bitfield of `bit_len` bits. - /// - `bit_len` is not a multiple of 8 and `bytes` contains set bits that are higher than, or - /// equal to `bit_len`. - fn from_raw_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>, bit_len: usize) -> Result { - if bit_len == 0 { - if bytes.len() == 1 && bytes[0] == 0 { - // A bitfield with `bit_len` 0 can only be represented by a single zero byte. - Ok(Self { - bytes, - len: 0, - _phantom: PhantomData, - }) - } else { - Err(Error::ExcessBits) - } - } else if bytes.len() != bytes_for_bit_len(bit_len) { - // The number of bytes must be the minimum required to represent `bit_len`. - Err(Error::InvalidByteCount { - given: bytes.len(), - expected: bytes_for_bit_len(bit_len), - }) - } else { - // Ensure there are no bits higher than `bit_len` that are set to true. - let (mask, _) = u8::max_value().overflowing_shr(8 - (bit_len as u32 % 8)); - - if (bytes.last().expect("Guarded against empty bytes") & !mask) == 0 { - Ok(Self { - bytes, - len: bit_len, - _phantom: PhantomData, - }) - } else { - Err(Error::ExcessBits) - } - } - } - - /// Returns the `Some(i)` where `i` is the highest index with a set bit. Returns `None` if - /// there are no set bits. - pub fn highest_set_bit(&self) -> Option { - self.bytes - .iter() - .enumerate() - .rev() - .find(|(_, byte)| **byte > 0) - .map(|(i, byte)| i * 8 + 7 - byte.leading_zeros() as usize) - } - - /// Returns an iterator across bitfield `bool` values, starting at the lowest index. - pub fn iter(&self) -> BitIter<'_, T> { - BitIter { - bitfield: self, - i: 0, - } - } - - /// Returns true if no bits are set. - pub fn is_zero(&self) -> bool { - self.bytes.iter().all(|byte| *byte == 0) - } - - /// Returns the number of bits that are set to `true`. - pub fn num_set_bits(&self) -> usize { - self.bytes - .iter() - .map(|byte| byte.count_ones() as usize) - .sum() - } - - /// Compute the difference of this Bitfield and another of potentially different length. - pub fn difference(&self, other: &Self) -> Self { - let mut result = self.clone(); - result.difference_inplace(other); - result - } - - /// Compute the difference of this Bitfield and another of potentially different length. - pub fn difference_inplace(&mut self, other: &Self) { - let min_byte_len = std::cmp::min(self.bytes.len(), other.bytes.len()); - - for i in 0..min_byte_len { - self.bytes[i] &= !other.bytes[i]; - } - } - - /// Shift the bits to higher indices, filling the lower indices with zeroes. - /// - /// The amount to shift by, `n`, must be less than or equal to `self.len()`. - pub fn shift_up(&mut self, n: usize) -> Result<(), Error> { - if n <= self.len() { - // Shift the bits up (starting from the high indices to avoid overwriting) - for i in (n..self.len()).rev() { - self.set(i, self.get(i - n)?)?; - } - // Zero the low bits - for i in 0..n { - self.set(i, false).unwrap(); - } - Ok(()) - } else { - Err(Error::OutOfBounds { - i: n, - len: self.len(), - }) - } - } -} - -/// Returns the minimum required bytes to represent a given number of bits. -/// -/// `bit_len == 0` requires a single byte. -fn bytes_for_bit_len(bit_len: usize) -> usize { - std::cmp::max(1, (bit_len + 7) / 8) -} - -/// An iterator over the bits in a `Bitfield`. -pub struct BitIter<'a, T> { - bitfield: &'a Bitfield, - i: usize, -} - -impl<'a, T: BitfieldBehaviour> Iterator for BitIter<'a, T> { - type Item = bool; - - fn next(&mut self) -> Option { - let res = self.bitfield.get(self.i).ok()?; - self.i += 1; - Some(res) - } -} - -impl Encode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - // We could likely do better than turning this into bytes and reading the length, however - // it is kept this way for simplicity. - self.clone().into_bytes().len() - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self.clone().into_bytes()) - } -} - -impl Decode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Self::from_bytes(bytes.to_smallvec()).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!("BitList failed to decode: {:?}", e)) - }) - } -} - -impl Encode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_bytes_len(&self) -> usize { - self.as_slice().len() - } - - fn ssz_fixed_len() -> usize { - bytes_for_bit_len(N::to_usize()) - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self.clone().into_bytes()) - } -} - -impl Decode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - bytes_for_bit_len(N::to_usize()) - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Self::from_bytes(bytes.to_smallvec()).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!("BitVector failed to decode: {:?}", e)) - }) - } -} - -impl Serialize for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&hex_encode(self.as_ssz_bytes())) - } -} - -impl<'de, N: Unsigned + Clone> Deserialize<'de> for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - Self::from_ssz_bytes(&bytes) - .map_err(|e| serde::de::Error::custom(format!("Bitfield {:?}", e))) - } -} - -impl Serialize for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&hex_encode(self.as_ssz_bytes())) - } -} - -impl<'de, N: Unsigned + Clone> Deserialize<'de> for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - Self::from_ssz_bytes(&bytes) - .map_err(|e| serde::de::Error::custom(format!("Bitfield {:?}", e))) - } -} - -impl tree_hash::TreeHash for Bitfield> { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - // Note: we use `as_slice` because it does _not_ have the length-delimiting bit set (or - // present). - let root = bitfield_bytes_tree_hash_root::(self.as_slice()); - tree_hash::mix_in_length(&root, self.len()) - } -} - -impl tree_hash::TreeHash for Bitfield> { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - bitfield_bytes_tree_hash_root::(self.as_slice()) - } -} - -#[cfg(feature = "arbitrary")] -impl arbitrary::Arbitrary<'_> for Bitfield> { - fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - let size = N::to_usize(); - let mut vec = smallvec![0u8; size]; - u.fill_buffer(&mut vec)?; - Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(feature = "arbitrary")] -impl arbitrary::Arbitrary<'_> for Bitfield> { - fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - let max_size = N::to_usize(); - let rand = usize::arbitrary(u)?; - let size = std::cmp::min(rand, max_size); - let mut vec = smallvec![0u8; size]; - u.fill_buffer(&mut vec)?; - Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(test)] -mod bitvector { - use super::*; - use crate::BitVector; - - pub type BitVector0 = BitVector; - pub type BitVector1 = BitVector; - pub type BitVector4 = BitVector; - pub type BitVector8 = BitVector; - pub type BitVector16 = BitVector; - pub type BitVector64 = BitVector; - - #[test] - fn ssz_encode() { - assert_eq!(BitVector0::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!(BitVector1::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!(BitVector4::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!(BitVector8::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!( - BitVector16::new().as_ssz_bytes(), - vec![0b0000_0000, 0b0000_0000] - ); - - let mut b = BitVector8::new(); - for i in 0..8 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![255]); - - let mut b = BitVector4::new(); - for i in 0..4 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![0b0000_1111]); - } - - #[test] - fn ssz_decode() { - assert!(BitVector0::from_ssz_bytes(&[0b0000_0000]).is_ok()); - assert!(BitVector0::from_ssz_bytes(&[0b0000_0001]).is_err()); - assert!(BitVector0::from_ssz_bytes(&[0b0000_0010]).is_err()); - - assert!(BitVector1::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitVector1::from_ssz_bytes(&[0b0000_0010]).is_err()); - assert!(BitVector1::from_ssz_bytes(&[0b0000_0100]).is_err()); - assert!(BitVector1::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_err()); - - assert!(BitVector8::from_ssz_bytes(&[0b0000_0000]).is_ok()); - assert!(BitVector8::from_ssz_bytes(&[1, 0b0000_0000]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0000, 1]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0010]).is_ok()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0001]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0010]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0100]).is_err()); - - assert!(BitVector16::from_ssz_bytes(&[0b0000_0000]).is_err()); - assert!(BitVector16::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_ok()); - assert!(BitVector16::from_ssz_bytes(&[1, 0b0000_0000, 0b0000_0000]).is_err()); - } - - #[test] - fn intersection() { - let a = BitVector16::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitVector16::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitVector16::from_raw_bytes(smallvec![0b1000, 0b0001], 16).unwrap(); - - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - assert_eq!(a.intersection(&c), c); - assert_eq!(b.intersection(&c), c); - assert_eq!(a.intersection(&a), a); - assert_eq!(b.intersection(&b), b); - assert_eq!(c.intersection(&c), c); - } - - #[test] - fn intersection_diff_length() { - let a = BitVector16::from_bytes(smallvec![0b0010_1110, 0b0010_1011]).unwrap(); - let b = BitVector16::from_bytes(smallvec![0b0010_1101, 0b0000_0001]).unwrap(); - let c = BitVector16::from_bytes(smallvec![0b0010_1100, 0b0000_0001]).unwrap(); - - assert_eq!(a.len(), 16); - assert_eq!(b.len(), 16); - assert_eq!(c.len(), 16); - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - } - - #[test] - fn union() { - let a = BitVector16::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitVector16::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitVector16::from_raw_bytes(smallvec![0b1111, 0b1001], 16).unwrap(); - - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - assert_eq!(a.union(&a), a); - assert_eq!(b.union(&b), b); - assert_eq!(c.union(&c), c); - } - - #[test] - fn union_diff_length() { - let a = BitVector16::from_bytes(smallvec![0b0010_1011, 0b0010_1110]).unwrap(); - let b = BitVector16::from_bytes(smallvec![0b0000_0001, 0b0010_1101]).unwrap(); - let c = BitVector16::from_bytes(smallvec![0b0010_1011, 0b0010_1111]).unwrap(); - - assert_eq!(a.len(), c.len()); - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - } - - #[test] - fn ssz_round_trip() { - assert_round_trip(BitVector0::new()); - - let mut b = BitVector1::new(); - b.set(0, true).unwrap(); - assert_round_trip(b); - - let mut b = BitVector8::new(); - for j in 0..8 { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitVector8::new(); - for j in 0..8 { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - - let mut b = BitVector16::new(); - for j in 0..16 { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitVector16::new(); - for j in 0..16 { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - } - - fn assert_round_trip(t: T) { - assert_eq!(T::from_ssz_bytes(&t.as_ssz_bytes()).unwrap(), t); - } - - #[test] - fn ssz_bytes_len() { - for i in 0..64 { - let mut bitfield = BitVector64::new(); - for j in 0..i { - bitfield.set(j, true).expect("should set bit in bounds"); - } - let bytes = bitfield.as_ssz_bytes(); - assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i); - } - } - - #[test] - fn excess_bits_nimbus() { - let bad = vec![0b0001_1111]; - - assert!(BitVector4::from_ssz_bytes(&bad).is_err()); - } - - // Ensure that stack size of a BitVector is manageable. - #[test] - fn size_of() { - assert_eq!(std::mem::size_of::(), SMALLVEC_LEN + 24); - } -} - -#[cfg(test)] -#[allow(clippy::cognitive_complexity)] -mod bitlist { - use super::*; - use crate::BitList; - - pub type BitList0 = BitList; - pub type BitList1 = BitList; - pub type BitList8 = BitList; - pub type BitList16 = BitList; - pub type BitList1024 = BitList; - - #[test] - fn ssz_encode() { - assert_eq!( - BitList0::with_capacity(0).unwrap().as_ssz_bytes(), - vec![0b0000_0001], - ); - - assert_eq!( - BitList1::with_capacity(0).unwrap().as_ssz_bytes(), - vec![0b0000_0001], - ); - - assert_eq!( - BitList1::with_capacity(1).unwrap().as_ssz_bytes(), - vec![0b0000_0010], - ); - - assert_eq!( - BitList8::with_capacity(8).unwrap().as_ssz_bytes(), - vec![0b0000_0000, 0b0000_0001], - ); - - assert_eq!( - BitList8::with_capacity(7).unwrap().as_ssz_bytes(), - vec![0b1000_0000] - ); - - let mut b = BitList8::with_capacity(8).unwrap(); - for i in 0..8 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![255, 0b0000_0001]); - - let mut b = BitList8::with_capacity(8).unwrap(); - for i in 0..4 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![0b0000_1111, 0b0000_0001]); - - assert_eq!( - BitList16::with_capacity(16).unwrap().as_ssz_bytes(), - vec![0b0000_0000, 0b0000_0000, 0b0000_0001] - ); - } - - #[test] - fn ssz_decode() { - assert!(BitList0::from_ssz_bytes(&[]).is_err()); - assert!(BitList1::from_ssz_bytes(&[]).is_err()); - assert!(BitList8::from_ssz_bytes(&[]).is_err()); - assert!(BitList16::from_ssz_bytes(&[]).is_err()); - - assert!(BitList0::from_ssz_bytes(&[0b0000_0000]).is_err()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_err()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0000]).is_err()); - assert!(BitList16::from_ssz_bytes(&[0b0000_0000]).is_err()); - - assert!(BitList0::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitList0::from_ssz_bytes(&[0b0000_0010]).is_err()); - - assert!(BitList1::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0010]).is_ok()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0100]).is_err()); - - assert!(BitList8::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0010]).is_ok()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0001]).is_ok()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0010]).is_err()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0100]).is_err()); - } - - #[test] - fn ssz_decode_extra_bytes() { - assert!(BitList0::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList16::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0]).is_err()); - assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0, 0]).is_err()); - assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0, 0, 0, 0]).is_err()); - } - - #[test] - fn ssz_round_trip() { - assert_round_trip(BitList0::with_capacity(0).unwrap()); - - for i in 0..2 { - assert_round_trip(BitList1::with_capacity(i).unwrap()); - } - for i in 0..9 { - assert_round_trip(BitList8::with_capacity(i).unwrap()); - } - for i in 0..17 { - assert_round_trip(BitList16::with_capacity(i).unwrap()); - } - - let mut b = BitList1::with_capacity(1).unwrap(); - b.set(0, true).unwrap(); - assert_round_trip(b); - - for i in 0..8 { - let mut b = BitList8::with_capacity(i).unwrap(); - for j in 0..i { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitList8::with_capacity(i).unwrap(); - for j in 0..i { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - } - - for i in 0..16 { - let mut b = BitList16::with_capacity(i).unwrap(); - for j in 0..i { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitList16::with_capacity(i).unwrap(); - for j in 0..i { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - } - } - - fn assert_round_trip(t: T) { - assert_eq!(T::from_ssz_bytes(&t.as_ssz_bytes()).unwrap(), t); - } - - #[test] - fn from_raw_bytes() { - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0000], 0).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 1).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0011], 2).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0111], 3).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_1111], 4).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0001_1111], 5).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0011_1111], 6).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0111_1111], 7).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], 8).is_ok()); - - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0001], 9).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0011], 10).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0111], 11).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_1111], 12).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0001_1111], 13).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0011_1111], 14).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0111_1111], 15).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b1111_1111], 16).is_ok()); - - for i in 0..8 { - assert!(BitList1024::from_raw_bytes(smallvec![], i).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], i).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0000, 0b1111_1110], i).is_err()); - } - - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 0).is_err()); - - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 0).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0011], 1).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0111], 2).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_1111], 3).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0001_1111], 4).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0011_1111], 5).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0111_1111], 6).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], 7).is_err()); - - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0001], 8).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0011], 9).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0111], 10).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_1111], 11).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0001_1111], 12).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0011_1111], 13).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0111_1111], 14).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b1111_1111], 15).is_err()); - } - - fn test_set_unset(num_bits: usize) { - let mut bitfield = BitList1024::with_capacity(num_bits).unwrap(); - - for i in 0..=num_bits { - if i < num_bits { - // Starts as false - assert_eq!(bitfield.get(i), Ok(false)); - // Can be set true. - assert!(bitfield.set(i, true).is_ok()); - assert_eq!(bitfield.get(i), Ok(true)); - // Can be set false - assert!(bitfield.set(i, false).is_ok()); - assert_eq!(bitfield.get(i), Ok(false)); - } else { - assert!(bitfield.get(i).is_err()); - assert!(bitfield.set(i, true).is_err()); - assert!(bitfield.get(i).is_err()); - } - } - } - - fn test_bytes_round_trip(num_bits: usize) { - for i in 0..num_bits { - let mut bitfield = BitList1024::with_capacity(num_bits).unwrap(); - bitfield.set(i, true).unwrap(); - - let bytes = bitfield.clone().into_raw_bytes(); - assert_eq!(bitfield, Bitfield::from_raw_bytes(bytes, num_bits).unwrap()); - } - } - - #[test] - fn set_unset() { - for i in 0..8 * 5 { - test_set_unset(i) - } - } - - #[test] - fn bytes_round_trip() { - for i in 0..8 * 5 { - test_bytes_round_trip(i) - } - } - - /// Type-specialised `smallvec` macro for testing. - macro_rules! bytevec { - ($($x : expr),* $(,)*) => { - { - let __smallvec: SmallVec<[u8; SMALLVEC_LEN]> = smallvec!($($x),*); - __smallvec - } - }; - } - - #[test] - fn into_raw_bytes() { - let mut bitfield = BitList1024::with_capacity(9).unwrap(); - bitfield.set(0, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_0001, 0b0000_0000] - ); - bitfield.set(1, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_0011, 0b0000_0000] - ); - bitfield.set(2, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_0111, 0b0000_0000] - ); - bitfield.set(3, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_1111, 0b0000_0000] - ); - bitfield.set(4, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0001_1111, 0b0000_0000] - ); - bitfield.set(5, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0011_1111, 0b0000_0000] - ); - bitfield.set(6, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0111_1111, 0b0000_0000] - ); - bitfield.set(7, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b1111_1111, 0b0000_0000] - ); - bitfield.set(8, true).unwrap(); - assert_eq!( - bitfield.into_raw_bytes(), - bytevec![0b1111_1111, 0b0000_0001] - ); - } - - #[test] - fn highest_set_bit() { - assert_eq!( - BitList1024::with_capacity(16).unwrap().highest_set_bit(), - None - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_0001, 0b0000_0000], 16) - .unwrap() - .highest_set_bit(), - Some(0) - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_0010, 0b0000_0000], 16) - .unwrap() - .highest_set_bit(), - Some(1) - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_1000], 8) - .unwrap() - .highest_set_bit(), - Some(3) - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_0000, 0b1000_0000], 16) - .unwrap() - .highest_set_bit(), - Some(15) - ); - } - - #[test] - fn intersection() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitList1024::from_raw_bytes(smallvec![0b1000, 0b0001], 16).unwrap(); - - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - assert_eq!(a.intersection(&c), c); - assert_eq!(b.intersection(&c), c); - assert_eq!(a.intersection(&a), a); - assert_eq!(b.intersection(&b), b); - assert_eq!(c.intersection(&c), c); - } - - #[test] - fn intersection_diff_length() { - let a = BitList1024::from_bytes(smallvec![0b0010_1110, 0b0010_1011]).unwrap(); - let b = BitList1024::from_bytes(smallvec![0b0010_1101, 0b0000_0001]).unwrap(); - let c = BitList1024::from_bytes(smallvec![0b0010_1100, 0b0000_0001]).unwrap(); - let d = BitList1024::from_bytes(smallvec![0b0010_1110, 0b1111_1111, 0b1111_1111]).unwrap(); - - assert_eq!(a.len(), 13); - assert_eq!(b.len(), 8); - assert_eq!(c.len(), 8); - assert_eq!(d.len(), 23); - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - assert_eq!(a.intersection(&d), a); - assert_eq!(d.intersection(&a), a); - } - - #[test] - fn union() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitList1024::from_raw_bytes(smallvec![0b1111, 0b1001], 16).unwrap(); - - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - assert_eq!(a.union(&a), a); - assert_eq!(b.union(&b), b); - assert_eq!(c.union(&c), c); - } - - #[test] - fn union_diff_length() { - let a = BitList1024::from_bytes(smallvec![0b0010_1011, 0b0010_1110]).unwrap(); - let b = BitList1024::from_bytes(smallvec![0b0000_0001, 0b0010_1101]).unwrap(); - let c = BitList1024::from_bytes(smallvec![0b0010_1011, 0b0010_1111]).unwrap(); - let d = BitList1024::from_bytes(smallvec![0b0010_1011, 0b1011_1110, 0b1000_1101]).unwrap(); - - assert_eq!(a.len(), c.len()); - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - assert_eq!(a.union(&d), d); - assert_eq!(d.union(&a), d); - } - - #[test] - fn difference() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let a_b = BitList1024::from_raw_bytes(smallvec![0b0100, 0b0000], 16).unwrap(); - let b_a = BitList1024::from_raw_bytes(smallvec![0b0011, 0b1000], 16).unwrap(); - - assert_eq!(a.difference(&b), a_b); - assert_eq!(b.difference(&a), b_a); - assert!(a.difference(&a).is_zero()); - } - - #[test] - fn difference_diff_length() { - let a = BitList1024::from_raw_bytes(smallvec![0b0110, 0b1100, 0b0011], 24).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let a_b = BitList1024::from_raw_bytes(smallvec![0b0100, 0b0100, 0b0011], 24).unwrap(); - let b_a = BitList1024::from_raw_bytes(smallvec![0b1001, 0b0001], 16).unwrap(); - - assert_eq!(a.difference(&b), a_b); - assert_eq!(b.difference(&a), b_a); - } - - #[test] - fn shift_up() { - let mut a = BitList1024::from_raw_bytes(smallvec![0b1100_1111, 0b1101_0110], 16).unwrap(); - let mut b = BitList1024::from_raw_bytes(smallvec![0b1001_1110, 0b1010_1101], 16).unwrap(); - - a.shift_up(1).unwrap(); - assert_eq!(a, b); - a.shift_up(15).unwrap(); - assert!(a.is_zero()); - - b.shift_up(16).unwrap(); - assert!(b.is_zero()); - assert!(b.shift_up(17).is_err()); - } - - #[test] - fn num_set_bits() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - - assert_eq!(a.num_set_bits(), 3); - assert_eq!(b.num_set_bits(), 5); - } - - #[test] - fn iter() { - let mut bitfield = BitList1024::with_capacity(9).unwrap(); - bitfield.set(2, true).unwrap(); - bitfield.set(8, true).unwrap(); - - assert_eq!( - bitfield.iter().collect::>(), - vec![false, false, true, false, false, false, false, false, true] - ); - } - - #[test] - fn ssz_bytes_len() { - for i in 1..64 { - let mut bitfield = BitList1024::with_capacity(i).unwrap(); - for j in 0..i { - bitfield.set(j, true).expect("should set bit in bounds"); - } - let bytes = bitfield.as_ssz_bytes(); - assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i); - } - } - - // Ensure that the stack size of a BitList is manageable. - #[test] - fn size_of() { - assert_eq!(std::mem::size_of::(), SMALLVEC_LEN + 24); - } -} diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs deleted file mode 100644 index 9625f27f3ab..00000000000 --- a/consensus/ssz_types/src/fixed_vector.rs +++ /dev/null @@ -1,446 +0,0 @@ -use crate::tree_hash::vec_tree_hash_root; -use crate::Error; -use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; -use std::marker::PhantomData; -use std::ops::{Deref, DerefMut, Index, IndexMut}; -use std::slice::SliceIndex; -use tree_hash::Hash256; -use typenum::Unsigned; - -pub use typenum; - -/// Emulates a SSZ `Vector` (distinct from a Rust `Vec`). -/// -/// An ordered, heap-allocated, fixed-length, homogeneous collection of `T`, with `N` values. -/// -/// This struct is backed by a Rust `Vec` but constrained such that it must be instantiated with a -/// fixed number of elements and you may not add or remove elements, only modify. -/// -/// The length of this struct is fixed at the type-level using -/// [typenum](https://crates.io/crates/typenum). -/// -/// ## Note -/// -/// Whilst it is possible with this library, SSZ declares that a `FixedVector` with a length of `0` -/// is illegal. -/// -/// ## Example -/// -/// ``` -/// use ssz_types::{FixedVector, typenum}; -/// -/// let base: Vec = vec![1, 2, 3, 4]; -/// -/// // Create a `FixedVector` from a `Vec` that has the expected length. -/// let exact: FixedVector<_, typenum::U4> = FixedVector::from(base.clone()); -/// assert_eq!(&exact[..], &[1, 2, 3, 4]); -/// -/// // Create a `FixedVector` from a `Vec` that is too long and the `Vec` is truncated. -/// let short: FixedVector<_, typenum::U3> = FixedVector::from(base.clone()); -/// assert_eq!(&short[..], &[1, 2, 3]); -/// -/// // Create a `FixedVector` from a `Vec` that is too short and the missing values are created -/// // using `std::default::Default`. -/// let long: FixedVector<_, typenum::U5> = FixedVector::from(base); -/// assert_eq!(&long[..], &[1, 2, 3, 4, 0]); -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] -#[derivative(PartialEq, Hash(bound = "T: std::hash::Hash"))] -#[serde(transparent)] -pub struct FixedVector { - vec: Vec, - _phantom: PhantomData, -} - -impl FixedVector { - /// Returns `Ok` if the given `vec` equals the fixed length of `Self`. Otherwise returns - /// `Err`. - pub fn new(vec: Vec) -> Result { - if vec.len() == Self::capacity() { - Ok(Self { - vec, - _phantom: PhantomData, - }) - } else { - Err(Error::OutOfBounds { - i: vec.len(), - len: Self::capacity(), - }) - } - } - - /// Create a new vector filled with clones of `elem`. - pub fn from_elem(elem: T) -> Self - where - T: Clone, - { - Self { - vec: vec![elem; N::to_usize()], - _phantom: PhantomData, - } - } - - /// Identical to `self.capacity`, returns the type-level constant length. - /// - /// Exists for compatibility with `Vec`. - pub fn len(&self) -> usize { - self.vec.len() - } - - /// True if the type-level constant length of `self` is zero. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the type-level constant length. - pub fn capacity() -> usize { - N::to_usize() - } -} - -impl From> for FixedVector { - fn from(mut vec: Vec) -> Self { - vec.resize_with(Self::capacity(), Default::default); - - Self { - vec, - _phantom: PhantomData, - } - } -} - -impl From> for Vec { - fn from(vector: FixedVector) -> Vec { - vector.vec - } -} - -impl Default for FixedVector { - fn default() -> Self { - Self { - vec: (0..N::to_usize()).map(|_| T::default()).collect(), - _phantom: PhantomData, - } - } -} - -impl> Index for FixedVector { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&self.vec, index) - } -} - -impl> IndexMut for FixedVector { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut self.vec, index) - } -} - -impl Deref for FixedVector { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -// This implementation is required to use `get_mut` to access elements. -// -// It's safe because none of the methods on mutable slices allow changing the length -// of the backing vec. -impl DerefMut for FixedVector { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - -impl tree_hash::TreeHash for FixedVector -where - T: tree_hash::TreeHash, -{ - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - vec_tree_hash_root::(&self.vec) - } -} - -impl ssz::Encode for FixedVector -where - T: ssz::Encode, -{ - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - T::ssz_fixed_len() * N::to_usize() - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn ssz_bytes_len(&self) -> usize { - self.vec.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - if T::is_ssz_fixed_len() { - buf.reserve(T::ssz_fixed_len() * self.len()); - - for item in &self.vec { - item.ssz_append(buf); - } - } else { - let mut encoder = - ssz::SszEncoder::container(buf, self.len() * ssz::BYTES_PER_LENGTH_OFFSET); - - for item in &self.vec { - encoder.append(item); - } - - encoder.finalize(); - } - } -} - -impl ssz::Decode for FixedVector -where - T: ssz::Decode, -{ - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - T::ssz_fixed_len() * N::to_usize() - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let fixed_len = N::to_usize(); - - if bytes.is_empty() { - Err(ssz::DecodeError::InvalidByteLength { - len: 0, - expected: 1, - }) - } else if T::is_ssz_fixed_len() { - let num_items = bytes - .len() - .checked_div(T::ssz_fixed_len()) - .ok_or(ssz::DecodeError::ZeroLengthItem)?; - - if num_items != fixed_len { - return Err(ssz::DecodeError::BytesInvalid(format!( - "FixedVector of {} items has {} items", - num_items, fixed_len - ))); - } - - bytes - .chunks(T::ssz_fixed_len()) - .map(|chunk| T::from_ssz_bytes(chunk)) - .collect::, _>>() - .and_then(|vec| { - Self::new(vec).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!( - "Wrong number of FixedVector elements: {:?}", - e - )) - }) - }) - } else { - let vec = ssz::decode_list_of_variable_length_items(bytes, Some(fixed_len))?; - Self::new(vec).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!( - "Wrong number of FixedVector elements: {:?}", - e - )) - }) - } - } -} - -#[cfg(feature = "arbitrary")] -impl<'a, T: arbitrary::Arbitrary<'a>, N: 'static + Unsigned> arbitrary::Arbitrary<'a> - for FixedVector -{ - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let size = N::to_usize(); - let mut vec: Vec = Vec::with_capacity(size); - for _ in 0..size { - vec.push(::arbitrary(u)?); - } - Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(test)] -mod test { - use super::*; - use ssz::*; - use tree_hash::{merkle_root, TreeHash}; - use tree_hash_derive::TreeHash; - use typenum::*; - - #[test] - fn new() { - let vec = vec![42; 5]; - let fixed: Result, _> = FixedVector::new(vec); - assert!(fixed.is_err()); - - let vec = vec![42; 3]; - let fixed: Result, _> = FixedVector::new(vec); - assert!(fixed.is_err()); - - let vec = vec![42; 4]; - let fixed: Result, _> = FixedVector::new(vec); - assert!(fixed.is_ok()); - } - - #[test] - fn indexing() { - let vec = vec![1, 2]; - - let mut fixed: FixedVector = vec.clone().into(); - - assert_eq!(fixed[0], 1); - assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!((fixed[..]).len(), 8192); - - fixed[1] = 3; - assert_eq!(fixed[1], 3); - } - - #[test] - fn length() { - let vec = vec![42; 5]; - let fixed: FixedVector = FixedVector::from(vec.clone()); - assert_eq!(&fixed[..], &vec[0..4]); - - let vec = vec![42; 3]; - let fixed: FixedVector = FixedVector::from(vec.clone()); - assert_eq!(&fixed[0..3], &vec[..]); - assert_eq!(&fixed[..], &vec![42, 42, 42, 0][..]); - - let vec = vec![]; - let fixed: FixedVector = FixedVector::from(vec); - assert_eq!(&fixed[..], &vec![0, 0, 0, 0][..]); - } - - #[test] - fn deref() { - let vec = vec![0, 2, 4, 6]; - let fixed: FixedVector = FixedVector::from(vec); - - assert_eq!(fixed.first(), Some(&0)); - assert_eq!(fixed.get(3), Some(&6)); - assert_eq!(fixed.get(4), None); - } - - #[test] - fn ssz_encode() { - let vec: FixedVector = vec![0; 2].into(); - assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); - assert_eq!( as Encode>::ssz_fixed_len(), 4); - } - - fn ssz_round_trip(item: T) { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - - #[test] - fn ssz_round_trip_u16_len_8() { - ssz_round_trip::>(vec![42; 8].into()); - ssz_round_trip::>(vec![0; 8].into()); - } - - #[test] - fn tree_hash_u8() { - let fixed: FixedVector = FixedVector::from(vec![]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); - - let fixed: FixedVector = FixedVector::from(vec![0; 1]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); - - let fixed: FixedVector = FixedVector::from(vec![0; 8]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); - - let fixed: FixedVector = FixedVector::from(vec![42; 16]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[42; 16], 0)); - - let source: Vec = (0..16).collect(); - let fixed: FixedVector = FixedVector::from(source.clone()); - assert_eq!(fixed.tree_hash_root(), merkle_root(&source, 0)); - } - - #[derive(Clone, Copy, TreeHash, Default)] - struct A { - a: u32, - b: u32, - } - - fn repeat(input: &[u8], n: usize) -> Vec { - let mut output = vec![]; - - for _ in 0..n { - output.append(&mut input.to_vec()); - } - - output - } - - #[test] - fn tree_hash_composite() { - let a = A { a: 0, b: 1 }; - - let fixed: FixedVector = FixedVector::from(vec![]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 32], 0)); - - let fixed: FixedVector = FixedVector::from(vec![a]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(a.tree_hash_root().as_bytes(), 0) - ); - - let fixed: FixedVector = FixedVector::from(vec![a; 8]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(&repeat(a.tree_hash_root().as_bytes(), 8), 0) - ); - - let fixed: FixedVector = FixedVector::from(vec![a; 13]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(&repeat(a.tree_hash_root().as_bytes(), 13), 0) - ); - - let fixed: FixedVector = FixedVector::from(vec![a; 16]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(&repeat(a.tree_hash_root().as_bytes(), 16), 0) - ); - } -} diff --git a/consensus/ssz_types/src/lib.rs b/consensus/ssz_types/src/lib.rs deleted file mode 100644 index 3e181da8cb3..00000000000 --- a/consensus/ssz_types/src/lib.rs +++ /dev/null @@ -1,72 +0,0 @@ -//! Provides types with unique properties required for SSZ serialization and Merklization: -//! -//! - `FixedVector`: A heap-allocated list with a size that is fixed at compile time. -//! - `VariableList`: A heap-allocated list that cannot grow past a type-level maximum length. -//! - `BitList`: A heap-allocated bitfield that with a type-level _maximum_ length. -//! - `BitVector`: A heap-allocated bitfield that with a type-level _fixed__ length. -//! -//! These structs are required as SSZ serialization and Merklization rely upon type-level lengths -//! for padding and verification. -//! -//! Adheres to the Ethereum 2.0 [SSZ -//! specification](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/ssz/simple-serialize.md) -//! at v0.12.1. -//! -//! ## Example -//! ``` -//! use ssz_types::*; -//! -//! pub struct Example { -//! bit_vector: BitVector, -//! bit_list: BitList, -//! variable_list: VariableList, -//! fixed_vector: FixedVector, -//! } -//! -//! let mut example = Example { -//! bit_vector: Bitfield::new(), -//! bit_list: Bitfield::with_capacity(4).unwrap(), -//! variable_list: <_>::from(vec![0, 1]), -//! fixed_vector: <_>::from(vec![2, 3]), -//! }; -//! -//! assert_eq!(example.bit_vector.len(), 8); -//! assert_eq!(example.bit_list.len(), 4); -//! assert_eq!(&example.variable_list[..], &[0, 1]); -//! assert_eq!(&example.fixed_vector[..], &[2, 3, 0, 0, 0, 0, 0, 0]); -//! -//! ``` - -#[macro_use] -mod bitfield; -mod fixed_vector; -pub mod serde_utils; -mod tree_hash; -mod variable_list; - -pub use bitfield::{BitList, BitVector, Bitfield}; -pub use fixed_vector::FixedVector; -pub use typenum; -pub use variable_list::VariableList; - -pub mod length { - pub use crate::bitfield::{Fixed, Variable}; -} - -/// Returned when an item encounters an error. -#[derive(PartialEq, Debug, Clone)] -pub enum Error { - OutOfBounds { - i: usize, - len: usize, - }, - /// A `BitList` does not have a set bit, therefore it's length is unknowable. - MissingLengthInformation, - /// A `BitList` has excess bits set to true. - ExcessBits, - /// A `BitList` has an invalid number of bytes for a given bit length. - InvalidByteCount { - given: usize, - expected: usize, - }, -} diff --git a/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs b/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs deleted file mode 100644 index 86077891bcd..00000000000 --- a/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs +++ /dev/null @@ -1,22 +0,0 @@ -use crate::FixedVector; -use eth2_serde_utils::hex::{self, PrefixedHexVisitor}; -use serde::{Deserializer, Serializer}; -use typenum::Unsigned; - -pub fn serialize(bytes: &FixedVector, serializer: S) -> Result -where - S: Serializer, - U: Unsigned, -{ - serializer.serialize_str(&hex::encode(&bytes[..])) -} - -pub fn deserialize<'de, D, U>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - U: Unsigned, -{ - let vec = deserializer.deserialize_string(PrefixedHexVisitor)?; - FixedVector::new(vec) - .map_err(|e| serde::de::Error::custom(format!("invalid fixed vector: {:?}", e))) -} diff --git a/consensus/ssz_types/src/serde_utils/hex_var_list.rs b/consensus/ssz_types/src/serde_utils/hex_var_list.rs deleted file mode 100644 index e3a3a14e06c..00000000000 --- a/consensus/ssz_types/src/serde_utils/hex_var_list.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! Serialize `VariableList` as 0x-prefixed hex string. -use crate::VariableList; -use eth2_serde_utils::hex::{self, PrefixedHexVisitor}; -use serde::{Deserializer, Serializer}; -use typenum::Unsigned; - -pub fn serialize(bytes: &VariableList, serializer: S) -> Result -where - S: Serializer, - N: Unsigned, -{ - serializer.serialize_str(&hex::encode(&**bytes)) -} - -pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - N: Unsigned, -{ - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - VariableList::new(bytes) - .map_err(|e| serde::de::Error::custom(format!("invalid variable list: {:?}", e))) -} diff --git a/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs b/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs deleted file mode 100644 index e2fd8ddf320..00000000000 --- a/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs +++ /dev/null @@ -1,77 +0,0 @@ -//! Serialize `VaraibleList, N>` as list of 0x-prefixed hex string. -use crate::VariableList; -use serde::{ser::SerializeSeq, Deserialize, Deserializer, Serialize, Serializer}; -use std::marker::PhantomData; -use typenum::Unsigned; - -#[derive(Deserialize)] -#[serde(transparent)] -pub struct WrappedListOwned( - #[serde(with = "crate::serde_utils::hex_var_list")] VariableList, -); - -#[derive(Serialize)] -#[serde(transparent)] -pub struct WrappedListRef<'a, N: Unsigned>( - #[serde(with = "crate::serde_utils::hex_var_list")] &'a VariableList, -); - -pub fn serialize( - list: &VariableList, N>, - serializer: S, -) -> Result -where - S: Serializer, - M: Unsigned, - N: Unsigned, -{ - let mut seq = serializer.serialize_seq(Some(list.len()))?; - for bytes in list { - seq.serialize_element(&WrappedListRef(bytes))?; - } - seq.end() -} - -#[derive(Default)] -pub struct Visitor { - _phantom_m: PhantomData, - _phantom_n: PhantomData, -} - -impl<'a, M, N> serde::de::Visitor<'a> for Visitor -where - M: Unsigned, - N: Unsigned, -{ - type Value = VariableList, N>; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of 0x-prefixed hex bytes") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let mut list: VariableList, N> = <_>::default(); - - while let Some(val) = seq.next_element::>()? { - list.push(val.0).map_err(|e| { - serde::de::Error::custom(format!("failed to push value to list: {:?}.", e)) - })?; - } - - Ok(list) - } -} - -pub fn deserialize<'de, D, M, N>( - deserializer: D, -) -> Result, N>, D::Error> -where - D: Deserializer<'de>, - M: Unsigned, - N: Unsigned, -{ - deserializer.deserialize_seq(Visitor::default()) -} diff --git a/consensus/ssz_types/src/serde_utils/quoted_u64_fixed_vec.rs b/consensus/ssz_types/src/serde_utils/quoted_u64_fixed_vec.rs deleted file mode 100644 index 0eb265adc31..00000000000 --- a/consensus/ssz_types/src/serde_utils/quoted_u64_fixed_vec.rs +++ /dev/null @@ -1,113 +0,0 @@ -//! Formats `FixedVector` using quotes. -//! -//! E.g., `FixedVector::from(vec![0, 1, 2])` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. If `N` does not equal the length deserialization will fail. - -use crate::serde_utils::quoted_u64_var_list::deserialize_max; -use crate::FixedVector; -use eth2_serde_utils::quoted_u64_vec::QuotedIntWrapper; -use serde::ser::SerializeSeq; -use serde::{Deserializer, Serializer}; -use std::marker::PhantomData; -use typenum::Unsigned; - -pub struct QuotedIntFixedVecVisitor { - _phantom: PhantomData, -} - -impl<'a, N> serde::de::Visitor<'a> for QuotedIntFixedVecVisitor -where - N: Unsigned, -{ - type Value = FixedVector; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of quoted or unquoted integers") - } - - fn visit_seq(self, seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let vec = deserialize_max(seq, N::to_usize())?; - let fix: FixedVector = FixedVector::new(vec) - .map_err(|e| serde::de::Error::custom(format!("FixedVector: {:?}", e)))?; - Ok(fix) - } -} - -pub fn serialize(value: &[u64], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for &int in value { - seq.serialize_element(&QuotedIntWrapper { int })?; - } - seq.end() -} - -pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - N: Unsigned, -{ - deserializer.deserialize_any(QuotedIntFixedVecVisitor { - _phantom: PhantomData, - }) -} - -#[cfg(test)] -mod test { - use super::*; - use serde_derive::{Deserialize, Serialize}; - use typenum::U4; - - #[derive(Debug, Serialize, Deserialize)] - struct Obj { - #[serde(with = "crate::serde_utils::quoted_u64_fixed_vec")] - values: FixedVector, - } - - #[test] - fn quoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", "2", "3", "4"] }"#).unwrap(); - let expected: FixedVector = FixedVector::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn unquoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2, 3, 4] }"#).unwrap(); - let expected: FixedVector = FixedVector::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn mixed_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", 2, "3", "4"] }"#).unwrap(); - let expected: FixedVector = FixedVector::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn empty_list_err() { - serde_json::from_str::(r#"{ "values": [] }"#).unwrap_err(); - } - - #[test] - fn short_list_err() { - serde_json::from_str::(r#"{ "values": [1, 2] }"#).unwrap_err(); - } - - #[test] - fn long_list_err() { - serde_json::from_str::(r#"{ "values": [1, 2, 3, 4, 5] }"#).unwrap_err(); - } - - #[test] - fn whole_list_quoted_err() { - serde_json::from_str::(r#"{ "values": "[1, 2, 3, 4]" }"#).unwrap_err(); - } -} diff --git a/consensus/ssz_types/src/serde_utils/quoted_u64_var_list.rs b/consensus/ssz_types/src/serde_utils/quoted_u64_var_list.rs deleted file mode 100644 index 9e176b63593..00000000000 --- a/consensus/ssz_types/src/serde_utils/quoted_u64_var_list.rs +++ /dev/null @@ -1,139 +0,0 @@ -//! Formats `VariableList` using quotes. -//! -//! E.g., `VariableList::from(vec![0, 1, 2])` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. If the length of the `Vec` is greater than `N`, deserialization fails. - -use crate::VariableList; -use eth2_serde_utils::quoted_u64_vec::QuotedIntWrapper; -use serde::ser::SerializeSeq; -use serde::{Deserializer, Serializer}; -use std::marker::PhantomData; -use typenum::Unsigned; - -pub struct QuotedIntVarListVisitor { - _phantom: PhantomData, -} - -impl<'a, N> serde::de::Visitor<'a> for QuotedIntVarListVisitor -where - N: Unsigned, -{ - type Value = VariableList; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of quoted or unquoted integers") - } - - fn visit_seq(self, seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let vec = deserialize_max(seq, N::to_usize())?; - let list: VariableList = VariableList::new(vec) - .map_err(|e| serde::de::Error::custom(format!("VariableList: {:?}", e)))?; - Ok(list) - } -} - -pub fn serialize(value: &[u64], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for &int in value { - seq.serialize_element(&QuotedIntWrapper { int })?; - } - seq.end() -} - -pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - N: Unsigned, -{ - deserializer.deserialize_any(QuotedIntVarListVisitor { - _phantom: PhantomData, - }) -} - -/// Returns a `Vec` of no more than `max_items` length. -pub(crate) fn deserialize_max<'a, A>(mut seq: A, max_items: usize) -> Result, A::Error> -where - A: serde::de::SeqAccess<'a>, -{ - let mut vec = vec![]; - let mut counter = 0; - - while let Some(val) = seq.next_element()? { - let val: QuotedIntWrapper = val; - counter += 1; - if counter > max_items { - return Err(serde::de::Error::custom(format!( - "Deserialization failed. Length cannot be greater than {}.", - max_items - ))); - } - - vec.push(val.int); - } - - Ok(vec) -} - -#[cfg(test)] -mod test { - use super::*; - use serde_derive::{Deserialize, Serialize}; - use typenum::U4; - - #[derive(Debug, Serialize, Deserialize)] - struct Obj { - #[serde(with = "crate::serde_utils::quoted_u64_var_list")] - values: VariableList, - } - - #[test] - fn quoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", "2", "3", "4"] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn unquoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2, 3, 4] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn mixed_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", 2, "3", "4"] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn empty_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [] }"#).unwrap(); - assert!(obj.values.is_empty()); - } - - #[test] - fn short_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2]); - assert_eq!(obj.values, expected); - } - - #[test] - fn long_list_err() { - serde_json::from_str::(r#"{ "values": [1, 2, 3, 4, 5] }"#).unwrap_err(); - } - - #[test] - fn whole_list_quoted_err() { - serde_json::from_str::(r#"{ "values": "[1, 2, 3, 4]" }"#).unwrap_err(); - } -} diff --git a/consensus/ssz_types/src/tree_hash.rs b/consensus/ssz_types/src/tree_hash.rs deleted file mode 100644 index e08c1d62fb1..00000000000 --- a/consensus/ssz_types/src/tree_hash.rs +++ /dev/null @@ -1,58 +0,0 @@ -use tree_hash::{Hash256, MerkleHasher, TreeHash, TreeHashType, BYTES_PER_CHUNK}; -use typenum::Unsigned; - -/// A helper function providing common functionality between the `TreeHash` implementations for -/// `FixedVector` and `VariableList`. -pub fn vec_tree_hash_root(vec: &[T]) -> Hash256 -where - T: TreeHash, - N: Unsigned, -{ - match T::tree_hash_type() { - TreeHashType::Basic => { - let mut hasher = MerkleHasher::with_leaves( - (N::to_usize() + T::tree_hash_packing_factor() - 1) / T::tree_hash_packing_factor(), - ); - - for item in vec { - hasher - .write(&item.tree_hash_packed_encoding()) - .expect("ssz_types variable vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types variable vec should not have a remaining buffer") - } - TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { - let mut hasher = MerkleHasher::with_leaves(N::to_usize()); - - for item in vec { - hasher - .write(item.tree_hash_root().as_bytes()) - .expect("ssz_types vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types vec should not have a remaining buffer") - } - } -} - -/// A helper function providing common functionality for finding the Merkle root of some bytes that -/// represent a bitfield. -pub fn bitfield_bytes_tree_hash_root(bytes: &[u8]) -> Hash256 { - let byte_size = (N::to_usize() + 7) / 8; - let leaf_count = (byte_size + BYTES_PER_CHUNK - 1) / BYTES_PER_CHUNK; - - let mut hasher = MerkleHasher::with_leaves(leaf_count); - - hasher - .write(bytes) - .expect("bitfield should not exceed tree hash leaf limit"); - - hasher - .finish() - .expect("bitfield tree hash buffer should not exceed leaf limit") -} diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs deleted file mode 100644 index 3361f750908..00000000000 --- a/consensus/ssz_types/src/variable_list.rs +++ /dev/null @@ -1,477 +0,0 @@ -use crate::tree_hash::vec_tree_hash_root; -use crate::Error; -use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; -use std::marker::PhantomData; -use std::ops::{Deref, DerefMut, Index, IndexMut}; -use std::slice::SliceIndex; -use tree_hash::Hash256; -use typenum::Unsigned; - -pub use typenum; - -/// Emulates a SSZ `List`. -/// -/// An ordered, heap-allocated, variable-length, homogeneous collection of `T`, with no more than -/// `N` values. -/// -/// This struct is backed by a Rust `Vec` but constrained such that it must be instantiated with a -/// fixed number of elements and you may not add or remove elements, only modify. -/// -/// The length of this struct is fixed at the type-level using -/// [typenum](https://crates.io/crates/typenum). -/// -/// ## Example -/// -/// ``` -/// use ssz_types::{VariableList, typenum}; -/// -/// let base: Vec = vec![1, 2, 3, 4]; -/// -/// // Create a `VariableList` from a `Vec` that has the expected length. -/// let exact: VariableList<_, typenum::U4> = VariableList::from(base.clone()); -/// assert_eq!(&exact[..], &[1, 2, 3, 4]); -/// -/// // Create a `VariableList` from a `Vec` that is too long and the `Vec` is truncated. -/// let short: VariableList<_, typenum::U3> = VariableList::from(base.clone()); -/// assert_eq!(&short[..], &[1, 2, 3]); -/// -/// // Create a `VariableList` from a `Vec` that is shorter than the maximum. -/// let mut long: VariableList<_, typenum::U5> = VariableList::from(base); -/// assert_eq!(&long[..], &[1, 2, 3, 4]); -/// -/// // Push a value to if it does not exceed the maximum -/// long.push(5).unwrap(); -/// assert_eq!(&long[..], &[1, 2, 3, 4, 5]); -/// -/// // Push a value to if it _does_ exceed the maximum. -/// assert!(long.push(6).is_err()); -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] -#[derivative(PartialEq, Eq, Hash(bound = "T: std::hash::Hash"))] -#[serde(transparent)] -pub struct VariableList { - vec: Vec, - _phantom: PhantomData, -} - -impl VariableList { - /// Returns `Some` if the given `vec` equals the fixed length of `Self`. Otherwise returns - /// `None`. - pub fn new(vec: Vec) -> Result { - if vec.len() <= N::to_usize() { - Ok(Self { - vec, - _phantom: PhantomData, - }) - } else { - Err(Error::OutOfBounds { - i: vec.len(), - len: Self::max_len(), - }) - } - } - - /// Create an empty list. - pub fn empty() -> Self { - Self { - vec: vec![], - _phantom: PhantomData, - } - } - - /// Returns the number of values presently in `self`. - pub fn len(&self) -> usize { - self.vec.len() - } - - /// True if `self` does not contain any values. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the type-level maximum length. - pub fn max_len() -> usize { - N::to_usize() - } - - /// Appends `value` to the back of `self`. - /// - /// Returns `Err(())` when appending `value` would exceed the maximum length. - pub fn push(&mut self, value: T) -> Result<(), Error> { - if self.vec.len() < Self::max_len() { - self.vec.push(value); - Ok(()) - } else { - Err(Error::OutOfBounds { - i: self.vec.len() + 1, - len: Self::max_len(), - }) - } - } -} - -impl From> for VariableList { - fn from(mut vec: Vec) -> Self { - vec.truncate(N::to_usize()); - - Self { - vec, - _phantom: PhantomData, - } - } -} - -impl From> for Vec { - fn from(list: VariableList) -> Vec { - list.vec - } -} - -impl Default for VariableList { - fn default() -> Self { - Self { - vec: Vec::default(), - _phantom: PhantomData, - } - } -} - -impl> Index for VariableList { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&self.vec, index) - } -} - -impl> IndexMut for VariableList { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut self.vec, index) - } -} - -impl Deref for VariableList { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -impl DerefMut for VariableList { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - -impl<'a, T, N: Unsigned> IntoIterator for &'a VariableList { - type Item = &'a T; - type IntoIter = std::slice::Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for VariableList { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.vec.into_iter() - } -} - -impl tree_hash::TreeHash for VariableList -where - T: tree_hash::TreeHash, -{ - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let root = vec_tree_hash_root::(&self.vec); - - tree_hash::mix_in_length(&root, self.len()) - } -} - -impl ssz::Encode for VariableList -where - T: ssz::Encode, -{ - fn is_ssz_fixed_len() -> bool { - >::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - >::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.vec.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.vec.ssz_append(buf) - } -} - -impl ssz::Decode for VariableList -where - T: ssz::Decode, - N: Unsigned, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let max_len = N::to_usize(); - - if bytes.is_empty() { - Ok(vec![].into()) - } else if T::is_ssz_fixed_len() { - let num_items = bytes - .len() - .checked_div(T::ssz_fixed_len()) - .ok_or(ssz::DecodeError::ZeroLengthItem)?; - - if num_items > max_len { - return Err(ssz::DecodeError::BytesInvalid(format!( - "VariableList of {} items exceeds maximum of {}", - num_items, max_len - ))); - } - - bytes - .chunks(T::ssz_fixed_len()) - .try_fold(Vec::with_capacity(num_items), |mut vec, chunk| { - vec.push(T::from_ssz_bytes(chunk)?); - Ok(vec) - }) - .map(Into::into) - } else { - ssz::decode_list_of_variable_length_items(bytes, Some(max_len)) - .map(|vec: Vec<_>| vec.into()) - } - } -} - -#[cfg(feature = "arbitrary")] -impl<'a, T: arbitrary::Arbitrary<'a>, N: 'static + Unsigned> arbitrary::Arbitrary<'a> - for VariableList -{ - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let max_size = N::to_usize(); - let rand = usize::arbitrary(u)?; - let size = std::cmp::min(rand, max_size); - let mut vec: Vec = Vec::with_capacity(size); - for _ in 0..size { - vec.push(::arbitrary(u)?); - } - Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(test)] -mod test { - use super::*; - use ssz::*; - use tree_hash::{merkle_root, TreeHash}; - use tree_hash_derive::TreeHash; - use typenum::*; - - #[test] - fn new() { - let vec = vec![42; 5]; - let fixed: Result, _> = VariableList::new(vec); - assert!(fixed.is_err()); - - let vec = vec![42; 3]; - let fixed: Result, _> = VariableList::new(vec); - assert!(fixed.is_ok()); - - let vec = vec![42; 4]; - let fixed: Result, _> = VariableList::new(vec); - assert!(fixed.is_ok()); - } - - #[test] - fn indexing() { - let vec = vec![1, 2]; - - let mut fixed: VariableList = vec.clone().into(); - - assert_eq!(fixed[0], 1); - assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!((fixed[..]).len(), 2); - - fixed[1] = 3; - assert_eq!(fixed[1], 3); - } - - #[test] - fn length() { - let vec = vec![42; 5]; - let fixed: VariableList = VariableList::from(vec.clone()); - assert_eq!(&fixed[..], &vec[0..4]); - - let vec = vec![42; 3]; - let fixed: VariableList = VariableList::from(vec.clone()); - assert_eq!(&fixed[0..3], &vec[..]); - assert_eq!(&fixed[..], &vec![42, 42, 42][..]); - - let vec = vec![]; - let fixed: VariableList = VariableList::from(vec); - assert_eq!(&fixed[..], &[] as &[u64]); - } - - #[test] - fn deref() { - let vec = vec![0, 2, 4, 6]; - let fixed: VariableList = VariableList::from(vec); - - assert_eq!(fixed.first(), Some(&0)); - assert_eq!(fixed.get(3), Some(&6)); - assert_eq!(fixed.get(4), None); - } - - #[test] - fn encode() { - let vec: VariableList = vec![0; 2].into(); - assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); - assert_eq!( as Encode>::ssz_fixed_len(), 4); - } - - fn round_trip(item: T) { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - - #[test] - fn u16_len_8() { - round_trip::>(vec![42; 8].into()); - round_trip::>(vec![0; 8].into()); - } - - fn root_with_length(bytes: &[u8], len: usize) -> Hash256 { - let root = merkle_root(bytes, 0); - tree_hash::mix_in_length(&root, len) - } - - #[test] - fn tree_hash_u8() { - let fixed: VariableList = VariableList::from(vec![]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&[0; 8], 0)); - - for i in 0..=1 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - for i in 0..=8 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - for i in 0..=13 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - for i in 0..=16 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - let source: Vec = (0..16).collect(); - let fixed: VariableList = VariableList::from(source.clone()); - assert_eq!(fixed.tree_hash_root(), root_with_length(&source, 16)); - } - - #[derive(Clone, Copy, TreeHash, Default)] - struct A { - a: u32, - b: u32, - } - - fn repeat(input: &[u8], n: usize) -> Vec { - let mut output = vec![]; - - for _ in 0..n { - output.append(&mut input.to_vec()); - } - - output - } - - fn padded_root_with_length(bytes: &[u8], len: usize, min_nodes: usize) -> Hash256 { - let root = merkle_root(bytes, min_nodes); - tree_hash::mix_in_length(&root, len) - } - - #[test] - fn tree_hash_composite() { - let a = A { a: 0, b: 1 }; - - let fixed: VariableList = VariableList::from(vec![]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&[0; 32], 0, 0), - ); - - for i in 0..=1 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 1), - "U1 {}", - i - ); - } - - for i in 0..=8 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 8), - "U8 {}", - i - ); - } - - for i in 0..=13 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 13), - "U13 {}", - i - ); - } - - for i in 0..=16 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 16), - "U16 {}", - i - ); - } - } -} diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index ccb41830be8..c16742782c6 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -13,15 +13,15 @@ tokio = { version = "1.14.0", features = ["rt-multi-thread"] } bls = { path = "../../crypto/bls" } integer-sqrt = "0.1.5" itertools = "0.10.0" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" -eth2_ssz_types = "0.2.2" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +ssz_types = "0.5.0" merkle_proof = { path = "../merkle_proof" } safe_arith = { path = "../safe_arith" } -tree_hash = "0.4.1" +tree_hash = "0.5.0" types = { path = "../types", default-features = false } rayon = "1.4.1" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" int_to_bytes = { path = "../int_to_bytes" } smallvec = "1.6.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } @@ -39,7 +39,7 @@ arbitrary-fuzz = [ "types/arbitrary-fuzz", "bls/arbitrary", "merkle_proof/arbitrary", - "eth2_ssz/arbitrary", - "eth2_ssz_types/arbitrary", + "ethereum_ssz/arbitrary", + "ssz_types/arbitrary", "tree_hash/arbitrary", ] diff --git a/consensus/state_processing/src/common/deposit_data_tree.rs b/consensus/state_processing/src/common/deposit_data_tree.rs index aaad96fbd53..2e86556b0fb 100644 --- a/consensus/state_processing/src/common/deposit_data_tree.rs +++ b/consensus/state_processing/src/common/deposit_data_tree.rs @@ -1,4 +1,4 @@ -use eth2_hashing::hash; +use ethereum_hashing::hash; use int_to_bytes::int_to_bytes32; use merkle_proof::{MerkleTree, MerkleTreeError}; use safe_arith::SafeArith; diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs index bb26799250d..731a82aa951 100644 --- a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -1,7 +1,7 @@ use super::errors::{BlockOperationError, BlsExecutionChangeInvalid as Invalid}; use crate::per_block_processing::signature_sets::bls_execution_change_signature_set; use crate::VerifySignatures; -use eth2_hashing::hash; +use ethereum_hashing::hash; use types::*; type Result = std::result::Result>; diff --git a/consensus/swap_or_not_shuffle/Cargo.toml b/consensus/swap_or_not_shuffle/Cargo.toml index 9a7d58b77d9..303e5cfba17 100644 --- a/consensus/swap_or_not_shuffle/Cargo.toml +++ b/consensus/swap_or_not_shuffle/Cargo.toml @@ -12,7 +12,7 @@ harness = false criterion = "0.3.3" [dependencies] -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" ethereum-types = "0.14.1" [features] diff --git a/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs b/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs index f43edfe8644..e71f3ca18e7 100644 --- a/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs +++ b/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs @@ -1,5 +1,5 @@ use crate::Hash256; -use eth2_hashing::{Context, Sha256Context}; +use ethereum_hashing::{Context, Sha256Context}; use std::cmp::max; /// Return `p(index)` in a pseudorandom permutation `p` of `0...list_size-1` with ``seed`` as entropy. diff --git a/consensus/swap_or_not_shuffle/src/shuffle_list.rs b/consensus/swap_or_not_shuffle/src/shuffle_list.rs index edc6dd6377c..2b9a2565547 100644 --- a/consensus/swap_or_not_shuffle/src/shuffle_list.rs +++ b/consensus/swap_or_not_shuffle/src/shuffle_list.rs @@ -1,5 +1,5 @@ use crate::Hash256; -use eth2_hashing::hash_fixed; +use ethereum_hashing::hash_fixed; use std::mem; const SEED_SIZE: usize = 32; diff --git a/consensus/tree_hash/Cargo.toml b/consensus/tree_hash/Cargo.toml deleted file mode 100644 index b2630d4bf60..00000000000 --- a/consensus/tree_hash/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "tree_hash" -version = "0.4.1" -authors = ["Paul Hauner "] -edition = "2021" -license = "Apache-2.0" -description = "Efficient Merkle-hashing as used in Ethereum 2.0" - -[dev-dependencies] -rand = "0.8.5" -tree_hash_derive = "0.4.0" -types = { path = "../types" } -beacon_chain = { path = "../../beacon_node/beacon_chain" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" - -[dependencies] -ethereum-types = "0.14.1" -eth2_hashing = "0.3.0" -smallvec = "1.6.1" - -[features] -arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/tree_hash/examples/flamegraph_beacon_state.rs b/consensus/tree_hash/examples/flamegraph_beacon_state.rs deleted file mode 100644 index e5b505bb91c..00000000000 --- a/consensus/tree_hash/examples/flamegraph_beacon_state.rs +++ /dev/null @@ -1,50 +0,0 @@ -use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; -use types::{BeaconState, EthSpec, MainnetEthSpec}; - -const TREE_HASH_LOOPS: usize = 1_000; -const VALIDATOR_COUNT: usize = 1_000; - -fn get_harness() -> BeaconChainHarness> { - let harness = BeaconChainHarness::builder(T::default()) - .default_spec() - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .build(); - - harness.advance_slot(); - - harness -} - -fn build_state() -> BeaconState { - let state = get_harness::().chain.head_beacon_state_cloned(); - - assert_eq!(state.as_base().unwrap().validators.len(), VALIDATOR_COUNT); - assert_eq!(state.as_base().unwrap().balances.len(), VALIDATOR_COUNT); - assert!(state - .as_base() - .unwrap() - .previous_epoch_attestations - .is_empty()); - assert!(state - .as_base() - .unwrap() - .current_epoch_attestations - .is_empty()); - assert!(state.as_base().unwrap().eth1_data_votes.is_empty()); - assert!(state.as_base().unwrap().historical_roots.is_empty()); - - state -} - -fn main() { - let state = build_state::(); - - // This vec is an attempt to ensure the compiler doesn't optimize-out the hashing. - let mut vec = Vec::with_capacity(TREE_HASH_LOOPS); - - for _ in 0..TREE_HASH_LOOPS { - let root = state.canonical_root(); - vec.push(root[0]); - } -} diff --git a/consensus/tree_hash/src/impls.rs b/consensus/tree_hash/src/impls.rs deleted file mode 100644 index 899356f8331..00000000000 --- a/consensus/tree_hash/src/impls.rs +++ /dev/null @@ -1,222 +0,0 @@ -use super::*; -use ethereum_types::{H160, H256, U128, U256}; - -fn int_to_hash256(int: u64) -> Hash256 { - let mut bytes = [0; HASHSIZE]; - bytes[0..8].copy_from_slice(&int.to_le_bytes()); - Hash256::from_slice(&bytes) -} - -macro_rules! impl_for_bitsize { - ($type: ident, $bit_size: expr) => { - impl TreeHash for $type { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - PackedEncoding::from_slice(&self.to_le_bytes()) - } - - fn tree_hash_packing_factor() -> usize { - HASHSIZE / ($bit_size / 8) - } - - #[allow(clippy::cast_lossless)] // Lint does not apply to all uses of this macro. - fn tree_hash_root(&self) -> Hash256 { - int_to_hash256(*self as u64) - } - } - }; -} - -impl_for_bitsize!(u8, 8); -impl_for_bitsize!(u16, 16); -impl_for_bitsize!(u32, 32); -impl_for_bitsize!(u64, 64); -impl_for_bitsize!(usize, 64); - -impl TreeHash for bool { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - (*self as u8).tree_hash_packed_encoding() - } - - fn tree_hash_packing_factor() -> usize { - u8::tree_hash_packing_factor() - } - - fn tree_hash_root(&self) -> Hash256 { - int_to_hash256(*self as u64) - } -} - -/// Only valid for byte types less than 32 bytes. -macro_rules! impl_for_lt_32byte_u8_array { - ($len: expr) => { - impl TreeHash for [u8; $len] { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("bytesN should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("bytesN should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; 32]; - result[0..$len].copy_from_slice(&self[..]); - Hash256::from_slice(&result) - } - } - }; -} - -impl_for_lt_32byte_u8_array!(4); -impl_for_lt_32byte_u8_array!(32); - -impl TreeHash for [u8; 48] { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let values_per_chunk = BYTES_PER_CHUNK; - let minimum_chunk_count = (48 + values_per_chunk - 1) / values_per_chunk; - merkle_root(self, minimum_chunk_count) - } -} - -impl TreeHash for U128 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - let mut result = [0; 16]; - self.to_little_endian(&mut result); - PackedEncoding::from_slice(&result) - } - - fn tree_hash_packing_factor() -> usize { - 2 - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; HASHSIZE]; - self.to_little_endian(&mut result[0..16]); - Hash256::from_slice(&result) - } -} - -impl TreeHash for U256 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - let mut result = [0; 32]; - self.to_little_endian(&mut result); - PackedEncoding::from_slice(&result) - } - - fn tree_hash_packing_factor() -> usize { - 1 - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; 32]; - self.to_little_endian(&mut result[..]); - Hash256::from_slice(&result) - } -} - -impl TreeHash for H160 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - let mut result = [0; 32]; - result[0..20].copy_from_slice(self.as_bytes()); - PackedEncoding::from_slice(&result) - } - - fn tree_hash_packing_factor() -> usize { - 1 - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; 32]; - result[0..20].copy_from_slice(self.as_bytes()); - Hash256::from_slice(&result) - } -} - -impl TreeHash for H256 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - PackedEncoding::from_slice(self.as_bytes()) - } - - fn tree_hash_packing_factor() -> usize { - 1 - } - - fn tree_hash_root(&self) -> Hash256 { - *self - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn bool() { - let mut true_bytes: Vec = vec![1]; - true_bytes.append(&mut vec![0; 31]); - - let false_bytes: Vec = vec![0; 32]; - - assert_eq!(true.tree_hash_root().as_bytes(), true_bytes.as_slice()); - assert_eq!(false.tree_hash_root().as_bytes(), false_bytes.as_slice()); - } - - #[test] - fn int_to_bytes() { - assert_eq!(int_to_hash256(0).as_bytes(), &[0; 32]); - assert_eq!( - int_to_hash256(1).as_bytes(), - &[ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0 - ] - ); - assert_eq!( - int_to_hash256(u64::max_value()).as_bytes(), - &[ - 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - ] - ); - } -} diff --git a/consensus/tree_hash/src/merkle_hasher.rs b/consensus/tree_hash/src/merkle_hasher.rs deleted file mode 100644 index 2acaf1c3b8f..00000000000 --- a/consensus/tree_hash/src/merkle_hasher.rs +++ /dev/null @@ -1,573 +0,0 @@ -use crate::{get_zero_hash, Hash256, HASHSIZE}; -use eth2_hashing::{Context, Sha256Context, HASH_LEN}; -use smallvec::{smallvec, SmallVec}; -use std::mem; - -type SmallVec8 = SmallVec<[T; 8]>; - -#[derive(Clone, Debug, PartialEq)] -pub enum Error { - /// The maximum number of leaves defined by the initialization `depth` has been exceed. - MaximumLeavesExceeded { max_leaves: usize }, -} - -/// Helper struct to store either a hash digest or a slice. -/// -/// Should be used as a left or right value for some node. -enum Preimage<'a> { - Digest([u8; HASH_LEN]), - Slice(&'a [u8]), -} - -impl<'a> Preimage<'a> { - /// Returns a 32-byte slice. - fn as_bytes(&self) -> &[u8] { - match self { - Preimage::Digest(digest) => digest.as_ref(), - Preimage::Slice(slice) => slice, - } - } -} - -/// A node that has had a left child supplied, but not a right child. -struct HalfNode { - /// The hasher context. - context: Context, - /// The tree id of the node. The root node has in id of `1` and ids increase moving down the - /// tree from left to right. - id: usize, -} - -impl HalfNode { - /// Create a new half-node from the given `left` value. - fn new(id: usize, left: Preimage) -> Self { - let mut context = Context::new(); - context.update(left.as_bytes()); - - Self { context, id } - } - - /// Complete the half-node by providing a `right` value. Returns a digest of the left and right - /// nodes. - fn finish(mut self, right: Preimage) -> [u8; HASH_LEN] { - self.context.update(right.as_bytes()); - self.context.finalize() - } -} - -/// Provides a Merkle-root hasher that allows for streaming bytes (i.e., providing any-length byte -/// slices without need to separate into leaves). Efficiently handles cases where not all leaves -/// have been provided by assuming all non-provided leaves are `[0; 32]` and pre-computing the -/// zero-value hashes at all depths of the tree. -/// -/// This algorithm aims to allocate as little memory as possible and it does this by "folding" up -/// the tree as each leaf is provided. Consider this step-by-step functional diagram of hashing a -/// tree with depth three: -/// -/// ## Functional Diagram -/// -/// Nodes that are `-` have not been defined and do not occupy memory. Nodes that are `L` are -/// leaves that are provided but are not stored. Nodes that have integers (`1`, `2`) are stored in -/// our struct. Finally, nodes that are `X` were stored, but are now removed. -/// -/// ### Start -/// -/// ```ignore -/// - -/// / \ -/// - - -/// / \ / \ -/// - - - - -/// ``` -/// -/// ### Provide first leaf -/// -/// ```ignore -/// - -/// / \ -/// 2 - -/// / \ / \ -/// L - - - -/// ``` -/// -/// ### Provide second leaf -/// -/// ```ignore -/// 1 -/// / \ -/// X - -/// / \ / \ -/// L L - - -/// ``` -/// -/// ### Provide third leaf -/// -/// ```ignore -/// 1 -/// / \ -/// X 3 -/// / \ / \ -/// L L L - -/// ``` -/// -/// ### Provide fourth and final leaf -/// -/// ```ignore -/// 1 -/// / \ -/// X X -/// / \ / \ -/// L L L L -/// ``` -/// -pub struct MerkleHasher { - /// Stores the nodes that are half-complete and awaiting a right node. - /// - /// A smallvec of size 8 means we can hash a tree with 256 leaves without allocating on the - /// heap. Each half-node is 232 bytes, so this smallvec may store 1856 bytes on the stack. - half_nodes: SmallVec8, - /// The depth of the tree that will be produced. - /// - /// Depth is counted top-down (i.e., the root node is at depth 0). A tree with 1 leaf has a - /// depth of 1, a tree with 4 leaves has a depth of 3. - depth: usize, - /// The next leaf that we are expecting to process. - next_leaf: usize, - /// A buffer of bytes that are waiting to be written to a leaf. - buffer: SmallVec<[u8; 32]>, - /// Set to Some(root) when the root of the tree is known. - root: Option, -} - -/// Returns the parent of node with id `i`. -fn get_parent(i: usize) -> usize { - i / 2 -} - -/// Gets the depth of a node with an id of `i`. -/// -/// It is a logic error to provide `i == 0`. -/// -/// E.g., if `i` is 1, depth is 0. If `i` is is 1, depth is 1. -fn get_depth(i: usize) -> usize { - let total_bits = mem::size_of::() * 8; - total_bits - i.leading_zeros() as usize - 1 -} - -impl MerkleHasher { - /// Instantiate a hasher for a tree with a given number of leaves. - /// - /// `num_leaves` will be rounded to the next power of two. E.g., if `num_leaves == 6`, then the - /// tree will _actually_ be able to accomodate 8 leaves and the resulting hasher is exactly the - /// same as one that was instantiated with `Self::with_leaves(8)`. - /// - /// ## Notes - /// - /// If `num_leaves == 0`, a tree of depth 1 will be created. If no leaves are provided it will - /// return a root of `[0; 32]`. - pub fn with_leaves(num_leaves: usize) -> Self { - let depth = get_depth(num_leaves.next_power_of_two()) + 1; - Self::with_depth(depth) - } - - /// Instantiates a new, empty hasher for a tree with `depth` layers which will have capacity - /// for `1 << (depth - 1)` leaf nodes. - /// - /// It is not possible to grow the depth of the tree after instantiation. - /// - /// ## Panics - /// - /// Panics if `depth == 0`. - fn with_depth(depth: usize) -> Self { - assert!(depth > 0, "merkle tree cannot have a depth of zero"); - - Self { - half_nodes: SmallVec::with_capacity(depth - 1), - depth, - next_leaf: 1 << (depth - 1), - buffer: SmallVec::with_capacity(32), - root: None, - } - } - - /// Write some bytes to the hasher. - /// - /// ## Errors - /// - /// Returns an error if the given bytes would create a leaf that would exceed the maximum - /// permissible number of leaves defined by the initialization `depth`. E.g., a tree of `depth - /// == 2` can only accept 2 leaves. A tree of `depth == 14` can only accept 8,192 leaves. - pub fn write(&mut self, bytes: &[u8]) -> Result<(), Error> { - let mut ptr = 0; - while ptr <= bytes.len() { - let slice = &bytes[ptr..std::cmp::min(bytes.len(), ptr + HASHSIZE)]; - - if self.buffer.is_empty() && slice.len() == HASHSIZE { - self.process_leaf(slice)?; - ptr += HASHSIZE - } else if self.buffer.len() + slice.len() < HASHSIZE { - self.buffer.extend_from_slice(slice); - ptr += HASHSIZE - } else { - let buf_len = self.buffer.len(); - let required = HASHSIZE - buf_len; - - let mut leaf = [0; HASHSIZE]; - leaf[..buf_len].copy_from_slice(&self.buffer); - leaf[buf_len..].copy_from_slice(&slice[0..required]); - - self.process_leaf(&leaf)?; - self.buffer = smallvec![]; - - ptr += required - } - } - - Ok(()) - } - - /// Process the next leaf in the tree. - /// - /// ## Errors - /// - /// Returns an error if the given leaf would exceed the maximum permissible number of leaves - /// defined by the initialization `depth`. E.g., a tree of `depth == 2` can only accept 2 - /// leaves. A tree of `depth == 14` can only accept 8,192 leaves. - fn process_leaf(&mut self, leaf: &[u8]) -> Result<(), Error> { - assert_eq!(leaf.len(), HASHSIZE, "a leaf must be 32 bytes"); - - let max_leaves = 1 << (self.depth + 1); - - if self.next_leaf > max_leaves { - return Err(Error::MaximumLeavesExceeded { max_leaves }); - } else if self.next_leaf == 1 { - // A tree of depth one has a root that is equal to the first given leaf. - self.root = Some(Hash256::from_slice(leaf)) - } else if self.next_leaf % 2 == 0 { - self.process_left_node(self.next_leaf, Preimage::Slice(leaf)) - } else { - self.process_right_node(self.next_leaf, Preimage::Slice(leaf)) - } - - self.next_leaf += 1; - - Ok(()) - } - - /// Returns the root of the Merkle tree. - /// - /// If not all leaves have been provided, the tree will be efficiently completed under the - /// assumption that all not-yet-provided leaves are equal to `[0; 32]`. - /// - /// ## Errors - /// - /// Returns an error if the bytes remaining in the buffer would create a leaf that would exceed - /// the maximum permissible number of leaves defined by the initialization `depth`. - pub fn finish(mut self) -> Result { - if !self.buffer.is_empty() { - let mut leaf = [0; HASHSIZE]; - leaf[..self.buffer.len()].copy_from_slice(&self.buffer); - self.process_leaf(&leaf)? - } - - // If the tree is incomplete, we must complete it by providing zero-hashes. - loop { - if let Some(root) = self.root { - break Ok(root); - } else if let Some(node) = self.half_nodes.last() { - let right_child = node.id * 2 + 1; - self.process_right_node(right_child, self.zero_hash(right_child)); - } else if self.next_leaf == 1 { - // The next_leaf can only be 1 if the tree has a depth of one. If have been no - // leaves supplied, assume a root of zero. - break Ok(Hash256::zero()); - } else { - // The only scenario where there are (a) no half nodes and (b) a tree of depth - // two or more is where no leaves have been supplied at all. - // - // Once we supply this first zero-hash leaf then all future operations will be - // triggered via the `process_right_node` branch. - self.process_left_node(self.next_leaf, self.zero_hash(self.next_leaf)) - } - } - } - - /// Process a node that will become the left-hand node of some parent. The supplied `id` is - /// that of the node (not the parent). The `preimage` is the value of the node (i.e., if this - /// is a leaf node it will be the value of that leaf). - /// - /// In this scenario, the only option is to push a new half-node. - fn process_left_node(&mut self, id: usize, preimage: Preimage) { - self.half_nodes - .push(HalfNode::new(get_parent(id), preimage)) - } - - /// Process a node that will become the right-hand node of some parent. The supplied `id` is - /// that of the node (not the parent). The `preimage` is the value of the node (i.e., if this - /// is a leaf node it will be the value of that leaf). - /// - /// This operation will always complete one node, then it will attempt to crawl up the tree and - /// collapse all other completed nodes. For example, consider a tree of depth 3 (see diagram - /// below). When providing the node with id `7`, the node with id `3` will be completed which - /// will also provide the right-node for the `1` node. This function will complete both of - /// those nodes and ultimately find the root of the tree. - /// - /// ```ignore - /// 1 <-- completed - /// / \ - /// 2 3 <-- completed - /// / \ / \ - /// 4 5 6 7 <-- supplied right node - /// ``` - fn process_right_node(&mut self, id: usize, mut preimage: Preimage) { - let mut parent = get_parent(id); - - loop { - match self.half_nodes.last() { - Some(node) if node.id == parent => { - preimage = Preimage::Digest( - self.half_nodes - .pop() - .expect("if .last() is Some then .pop() must succeed") - .finish(preimage), - ); - if parent == 1 { - self.root = Some(Hash256::from_slice(preimage.as_bytes())); - break; - } else { - parent = get_parent(parent); - } - } - _ => { - self.half_nodes.push(HalfNode::new(parent, preimage)); - break; - } - } - } - } - - /// Returns a "zero hash" from a pre-computed set for the given node. - /// - /// Note: this node is not always zero, instead it is the result of hashing up a tree where the - /// leaves are all zeros. E.g., in a tree of depth 2, the `zero_hash` of a node at depth 1 - /// will be `[0; 32]`. However, the `zero_hash` for a node at depth 0 will be - /// `hash(concat([0; 32], [0; 32])))`. - fn zero_hash(&self, id: usize) -> Preimage<'static> { - Preimage::Slice(get_zero_hash(self.depth - (get_depth(id) + 1))) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::merkleize_padded; - - /// This test is just to ensure that the stack size of the `Context` remains the same. We choose - /// our smallvec size based upon this, so it's good to know if it suddenly changes in size. - #[test] - fn context_size() { - assert_eq!( - mem::size_of::(), - 224, - "Halfnode size should be as expected" - ); - } - - fn compare_with_reference(leaves: &[Hash256], depth: usize) { - let reference_bytes = leaves - .iter() - .flat_map(|hash| hash.as_bytes()) - .copied() - .collect::>(); - - let reference_root = merkleize_padded(&reference_bytes, 1 << (depth - 1)); - - let merklizer_root_32_bytes = { - let mut m = MerkleHasher::with_depth(depth); - for leaf in leaves.iter() { - m.write(leaf.as_bytes()).expect("should process leaf"); - } - m.finish().expect("should finish") - }; - - assert_eq!( - reference_root, merklizer_root_32_bytes, - "32 bytes should match reference root" - ); - - let merklizer_root_individual_3_bytes = { - let mut m = MerkleHasher::with_depth(depth); - for bytes in reference_bytes.chunks(3) { - m.write(bytes).expect("should process byte"); - } - m.finish().expect("should finish") - }; - - assert_eq!( - reference_root, merklizer_root_individual_3_bytes, - "3 bytes should match reference root" - ); - - let merklizer_root_individual_single_bytes = { - let mut m = MerkleHasher::with_depth(depth); - for byte in reference_bytes.iter() { - m.write(&[*byte]).expect("should process byte"); - } - m.finish().expect("should finish") - }; - - assert_eq!( - reference_root, merklizer_root_individual_single_bytes, - "single bytes should match reference root" - ); - } - - /// A simple wrapper to compare MerkleHasher to the reference function by just giving a number - /// of leaves and a depth. - fn compare_reference_with_len(leaves: u64, depth: usize) { - let leaves = (0..leaves) - .map(Hash256::from_low_u64_be) - .collect::>(); - compare_with_reference(&leaves, depth) - } - - /// Compares the `MerkleHasher::with_depth` and `MerkleHasher::with_leaves` generate consistent - /// results. - fn compare_new_with_leaf_count(num_leaves: u64, depth: usize) { - let leaves = (0..num_leaves) - .map(Hash256::from_low_u64_be) - .collect::>(); - - let from_depth = { - let mut m = MerkleHasher::with_depth(depth); - for leaf in leaves.iter() { - m.write(leaf.as_bytes()).expect("should process leaf"); - } - m.finish() - }; - - let from_num_leaves = { - let mut m = MerkleHasher::with_leaves(num_leaves as usize); - for leaf in leaves.iter() { - m.process_leaf(leaf.as_bytes()) - .expect("should process leaf"); - } - m.finish() - }; - - assert_eq!( - from_depth, from_num_leaves, - "hash generated by depth should match that from num leaves" - ); - } - - #[test] - fn with_leaves() { - compare_new_with_leaf_count(1, 1); - compare_new_with_leaf_count(2, 2); - compare_new_with_leaf_count(3, 3); - compare_new_with_leaf_count(4, 3); - compare_new_with_leaf_count(5, 4); - compare_new_with_leaf_count(6, 4); - compare_new_with_leaf_count(7, 4); - compare_new_with_leaf_count(8, 4); - compare_new_with_leaf_count(9, 5); - compare_new_with_leaf_count(10, 5); - compare_new_with_leaf_count(11, 5); - compare_new_with_leaf_count(12, 5); - compare_new_with_leaf_count(13, 5); - compare_new_with_leaf_count(14, 5); - compare_new_with_leaf_count(15, 5); - } - - #[test] - fn depth() { - assert_eq!(get_depth(1), 0); - assert_eq!(get_depth(2), 1); - assert_eq!(get_depth(3), 1); - assert_eq!(get_depth(4), 2); - assert_eq!(get_depth(5), 2); - assert_eq!(get_depth(6), 2); - assert_eq!(get_depth(7), 2); - assert_eq!(get_depth(8), 3); - } - - #[test] - fn with_0_leaves() { - let hasher = MerkleHasher::with_leaves(0); - assert_eq!(hasher.finish().unwrap(), Hash256::zero()); - } - - #[test] - #[should_panic] - fn too_many_leaves() { - compare_reference_with_len(2, 1); - } - - #[test] - fn full_trees() { - compare_reference_with_len(1, 1); - compare_reference_with_len(2, 2); - compare_reference_with_len(4, 3); - compare_reference_with_len(8, 4); - compare_reference_with_len(16, 5); - compare_reference_with_len(32, 6); - compare_reference_with_len(64, 7); - compare_reference_with_len(128, 8); - compare_reference_with_len(256, 9); - compare_reference_with_len(256, 9); - compare_reference_with_len(8192, 14); - } - - #[test] - fn incomplete_trees() { - compare_reference_with_len(0, 1); - - compare_reference_with_len(0, 2); - compare_reference_with_len(1, 2); - - for i in 0..=4 { - compare_reference_with_len(i, 3); - } - - for i in 0..=7 { - compare_reference_with_len(i, 4); - } - - for i in 0..=15 { - compare_reference_with_len(i, 5); - } - - for i in 0..=32 { - compare_reference_with_len(i, 6); - } - - for i in 0..=64 { - compare_reference_with_len(i, 7); - } - - compare_reference_with_len(0, 14); - compare_reference_with_len(13, 14); - compare_reference_with_len(8191, 14); - } - - #[test] - fn remaining_buffer() { - let a = { - let mut m = MerkleHasher::with_leaves(2); - m.write(&[1]).expect("should write"); - m.finish().expect("should finish") - }; - - let b = { - let mut m = MerkleHasher::with_leaves(2); - let mut leaf = vec![1]; - leaf.extend_from_slice(&[0; 31]); - m.write(&leaf).expect("should write"); - m.write(&[0; 32]).expect("should write"); - m.finish().expect("should finish") - }; - - assert_eq!(a, b, "should complete buffer"); - } -} diff --git a/consensus/tree_hash/src/merkleize_padded.rs b/consensus/tree_hash/src/merkleize_padded.rs deleted file mode 100644 index f7dce399497..00000000000 --- a/consensus/tree_hash/src/merkleize_padded.rs +++ /dev/null @@ -1,330 +0,0 @@ -use super::{get_zero_hash, Hash256, BYTES_PER_CHUNK}; -use eth2_hashing::{hash32_concat, hash_fixed}; - -/// Merkleize `bytes` and return the root, optionally padding the tree out to `min_leaves` number of -/// leaves. -/// -/// **Note**: This function is generally worse than using the `crate::merkle_root` which uses -/// `MerkleHasher`. We only keep this function around for reference testing. -/// -/// First all nodes are extracted from `bytes` and then a padding node is added until the number of -/// leaf chunks is greater than or equal to `min_leaves`. Callers may set `min_leaves` to `0` if no -/// adding additional chunks should be added to the given `bytes`. -/// -/// If `bytes.len() <= BYTES_PER_CHUNK`, no hashing is done and `bytes` is returned, potentially -/// padded out to `BYTES_PER_CHUNK` length with `0`. -/// -/// ## CPU Performance -/// -/// A cache of `MAX_TREE_DEPTH` hashes are stored to avoid re-computing the hashes of padding nodes -/// (or their parents). Therefore, adding padding nodes only incurs one more hash per additional -/// height of the tree. -/// -/// ## Memory Performance -/// -/// This algorithm has two interesting memory usage properties: -/// -/// 1. The maximum memory footprint is roughly `O(V / 2)` memory, where `V` is the number of leaf -/// chunks with values (i.e., leaves that are not padding). The means adding padding nodes to -/// the tree does not increase the memory footprint. -/// 2. At each height of the tree half of the memory is freed until only a single chunk is stored. -/// 3. The input `bytes` are not copied into another list before processing. -/// -/// _Note: there are some minor memory overheads, including a handful of usizes and a list of -/// `MAX_TREE_DEPTH` hashes as `lazy_static` constants._ -pub fn merkleize_padded(bytes: &[u8], min_leaves: usize) -> Hash256 { - // If the bytes are just one chunk or less, pad to one chunk and return without hashing. - if bytes.len() <= BYTES_PER_CHUNK && min_leaves <= 1 { - let mut o = bytes.to_vec(); - o.resize(BYTES_PER_CHUNK, 0); - return Hash256::from_slice(&o); - } - - assert!( - bytes.len() > BYTES_PER_CHUNK || min_leaves > 1, - "Merkle hashing only needs to happen if there is more than one chunk" - ); - - // The number of leaves that can be made directly from `bytes`. - let leaves_with_values = (bytes.len() + (BYTES_PER_CHUNK - 1)) / BYTES_PER_CHUNK; - - // The number of parents that have at least one non-padding leaf. - // - // Since there is more than one node in this tree (see prior assertion), there should always be - // one or more initial parent nodes. - let initial_parents_with_values = std::cmp::max(1, next_even_number(leaves_with_values) / 2); - - // The number of leaves in the full tree (including padding nodes). - let num_leaves = std::cmp::max(leaves_with_values, min_leaves).next_power_of_two(); - - // The number of levels in the tree. - // - // A tree with a single node has `height == 1`. - let height = num_leaves.trailing_zeros() as usize + 1; - - assert!(height >= 2, "The tree should have two or more heights"); - - // A buffer/scratch-space used for storing each round of hashes at each height. - // - // This buffer is kept as small as possible; it will shrink so it never stores a padding node. - let mut chunks = ChunkStore::with_capacity(initial_parents_with_values); - - // Create a parent in the `chunks` buffer for every two chunks in `bytes`. - // - // I.e., do the first round of hashing, hashing from the `bytes` slice and filling the `chunks` - // struct. - for i in 0..initial_parents_with_values { - let start = i * BYTES_PER_CHUNK * 2; - - // Hash two chunks, creating a parent chunk. - let hash = match bytes.get(start..start + BYTES_PER_CHUNK * 2) { - // All bytes are available, hash as usual. - Some(slice) => hash_fixed(slice), - // Unable to get all the bytes, get a small slice and pad it out. - None => { - let mut preimage = bytes - .get(start..) - .expect("`i` can only be larger than zero if there are bytes to read") - .to_vec(); - preimage.resize(BYTES_PER_CHUNK * 2, 0); - hash_fixed(&preimage) - } - }; - - assert_eq!( - hash.len(), - BYTES_PER_CHUNK, - "Hashes should be exactly one chunk" - ); - - // Store the parent node. - chunks - .set(i, &hash) - .expect("Buffer should always have capacity for parent nodes") - } - - // Iterate through all heights above the leaf nodes and either (a) hash two children or, (b) - // hash a left child and a right padding node. - // - // Skip the 0'th height because the leaves have already been processed. Skip the highest-height - // in the tree as it is the root does not require hashing. - // - // The padding nodes for each height are cached via `lazy static` to simulate non-adjacent - // padding nodes (i.e., avoid doing unnecessary hashing). - for height in 1..height - 1 { - let child_nodes = chunks.len(); - let parent_nodes = next_even_number(child_nodes) / 2; - - // For each pair of nodes stored in `chunks`: - // - // - If two nodes are available, hash them to form a parent. - // - If one node is available, hash it and a cached padding node to form a parent. - for i in 0..parent_nodes { - let (left, right) = match (chunks.get(i * 2), chunks.get(i * 2 + 1)) { - (Ok(left), Ok(right)) => (left, right), - (Ok(left), Err(_)) => (left, get_zero_hash(height)), - // Deriving `parent_nodes` from `chunks.len()` has ensured that we never encounter the - // scenario where we expect two nodes but there are none. - (Err(_), Err(_)) => unreachable!("Parent must have one child"), - // `chunks` is a contiguous array so it is impossible for an index to be missing - // when a higher index is present. - (Err(_), Ok(_)) => unreachable!("Parent must have a left child"), - }; - - assert!( - left.len() == right.len() && right.len() == BYTES_PER_CHUNK, - "Both children should be `BYTES_PER_CHUNK` bytes." - ); - - let hash = hash32_concat(left, right); - - // Store a parent node. - chunks - .set(i, &hash) - .expect("Buf is adequate size for parent"); - } - - // Shrink the buffer so it neatly fits the number of new nodes created in this round. - // - // The number of `parent_nodes` is either decreasing or stable. It never increases. - chunks.truncate(parent_nodes); - } - - // There should be a single chunk left in the buffer and it is the Merkle root. - let root = chunks.into_vec(); - - assert_eq!(root.len(), BYTES_PER_CHUNK, "Only one chunk should remain"); - - Hash256::from_slice(&root) -} - -/// A helper struct for storing words of `BYTES_PER_CHUNK` size in a flat byte array. -#[derive(Debug)] -struct ChunkStore(Vec); - -impl ChunkStore { - /// Creates a new instance with `chunks` padding nodes. - fn with_capacity(chunks: usize) -> Self { - Self(vec![0; chunks * BYTES_PER_CHUNK]) - } - - /// Set the `i`th chunk to `value`. - /// - /// Returns `Err` if `value.len() != BYTES_PER_CHUNK` or `i` is out-of-bounds. - fn set(&mut self, i: usize, value: &[u8]) -> Result<(), ()> { - if i < self.len() && value.len() == BYTES_PER_CHUNK { - let slice = &mut self.0[i * BYTES_PER_CHUNK..i * BYTES_PER_CHUNK + BYTES_PER_CHUNK]; - slice.copy_from_slice(value); - Ok(()) - } else { - Err(()) - } - } - - /// Gets the `i`th chunk. - /// - /// Returns `Err` if `i` is out-of-bounds. - fn get(&self, i: usize) -> Result<&[u8], ()> { - if i < self.len() { - Ok(&self.0[i * BYTES_PER_CHUNK..i * BYTES_PER_CHUNK + BYTES_PER_CHUNK]) - } else { - Err(()) - } - } - - /// Returns the number of chunks presently stored in `self`. - fn len(&self) -> usize { - self.0.len() / BYTES_PER_CHUNK - } - - /// Truncates 'self' to `num_chunks` chunks. - /// - /// Functionally identical to `Vec::truncate`. - fn truncate(&mut self, num_chunks: usize) { - self.0.truncate(num_chunks * BYTES_PER_CHUNK) - } - - /// Consumes `self`, returning the underlying byte array. - fn into_vec(self) -> Vec { - self.0 - } -} - -/// Returns the next even number following `n`. If `n` is even, `n` is returned. -fn next_even_number(n: usize) -> usize { - n + n % 2 -} - -#[cfg(test)] -mod test { - use super::*; - use crate::ZERO_HASHES_MAX_INDEX; - - pub fn reference_root(bytes: &[u8]) -> Hash256 { - crate::merkleize_standard(bytes) - } - - macro_rules! common_tests { - ($get_bytes: ident) => { - #[test] - fn zero_value_0_nodes() { - test_against_reference(&$get_bytes(0 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_1_nodes() { - test_against_reference(&$get_bytes(1 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_2_nodes() { - test_against_reference(&$get_bytes(2 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_3_nodes() { - test_against_reference(&$get_bytes(3 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_4_nodes() { - test_against_reference(&$get_bytes(4 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_8_nodes() { - test_against_reference(&$get_bytes(8 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_9_nodes() { - test_against_reference(&$get_bytes(9 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_8_nodes_varying_min_length() { - for i in 0..64 { - test_against_reference(&$get_bytes(8 * BYTES_PER_CHUNK), i); - } - } - - #[test] - fn zero_value_range_of_nodes() { - for i in 0..32 * BYTES_PER_CHUNK { - test_against_reference(&$get_bytes(i), 0); - } - } - - #[test] - fn max_tree_depth_min_nodes() { - let input = vec![0; 10 * BYTES_PER_CHUNK]; - let min_nodes = 2usize.pow(ZERO_HASHES_MAX_INDEX as u32); - assert_eq!( - merkleize_padded(&input, min_nodes).as_bytes(), - get_zero_hash(ZERO_HASHES_MAX_INDEX) - ); - } - }; - } - - mod zero_value { - use super::*; - - fn zero_bytes(bytes: usize) -> Vec { - vec![0; bytes] - } - - common_tests!(zero_bytes); - } - - mod random_value { - use super::*; - use rand::RngCore; - - fn random_bytes(bytes: usize) -> Vec { - let mut bytes = Vec::with_capacity(bytes); - rand::thread_rng().fill_bytes(&mut bytes); - bytes - } - - common_tests!(random_bytes); - } - - fn test_against_reference(input: &[u8], min_nodes: usize) { - let mut reference_input = input.to_vec(); - reference_input.resize( - std::cmp::max( - reference_input.len(), - min_nodes.next_power_of_two() * BYTES_PER_CHUNK, - ), - 0, - ); - - assert_eq!( - reference_root(&reference_input), - merkleize_padded(input, min_nodes), - "input.len(): {:?}", - input.len() - ); - } -} diff --git a/consensus/tree_hash/src/merkleize_standard.rs b/consensus/tree_hash/src/merkleize_standard.rs deleted file mode 100644 index 6dd046991ed..00000000000 --- a/consensus/tree_hash/src/merkleize_standard.rs +++ /dev/null @@ -1,81 +0,0 @@ -use super::*; -use eth2_hashing::hash; - -/// Merkleizes bytes and returns the root, using a simple algorithm that does not optimize to avoid -/// processing or storing padding bytes. -/// -/// **Note**: This function is generally worse than using the `crate::merkle_root` which uses -/// `MerkleHasher`. We only keep this function around for reference testing. -/// -/// The input `bytes` will be padded to ensure that the number of leaves is a power-of-two. -/// -/// ## CPU Performance -/// -/// Will hash all nodes in the tree, even if they are padding and pre-determined. -/// -/// ## Memory Performance -/// -/// - Duplicates the input `bytes`. -/// - Stores all internal nodes, even if they are padding. -/// - Does not free up unused memory during operation. -pub fn merkleize_standard(bytes: &[u8]) -> Hash256 { - // If the bytes are just one chunk (or less than one chunk) just return them. - if bytes.len() <= HASHSIZE { - let mut o = bytes.to_vec(); - o.resize(HASHSIZE, 0); - return Hash256::from_slice(&o[0..HASHSIZE]); - } - - let leaves = num_sanitized_leaves(bytes.len()); - let nodes = num_nodes(leaves); - let internal_nodes = nodes - leaves; - - let num_bytes = std::cmp::max(internal_nodes, 1) * HASHSIZE + bytes.len(); - - let mut o: Vec = vec![0; internal_nodes * HASHSIZE]; - - o.append(&mut bytes.to_vec()); - - assert_eq!(o.len(), num_bytes); - - let empty_chunk_hash = hash(&[0; MERKLE_HASH_CHUNK]); - - let mut i = nodes * HASHSIZE; - let mut j = internal_nodes * HASHSIZE; - - while i >= MERKLE_HASH_CHUNK { - i -= MERKLE_HASH_CHUNK; - - j -= HASHSIZE; - let hash = match o.get(i..i + MERKLE_HASH_CHUNK) { - // All bytes are available, hash as usual. - Some(slice) => hash(slice), - // Unable to get all the bytes. - None => { - match o.get(i..) { - // Able to get some of the bytes, pad them out. - Some(slice) => { - let mut bytes = slice.to_vec(); - bytes.resize(MERKLE_HASH_CHUNK, 0); - hash(&bytes) - } - // Unable to get any bytes, use the empty-chunk hash. - None => empty_chunk_hash.clone(), - } - } - }; - - o[j..j + HASHSIZE].copy_from_slice(&hash); - } - - Hash256::from_slice(&o[0..HASHSIZE]) -} - -fn num_sanitized_leaves(num_bytes: usize) -> usize { - let leaves = (num_bytes + HASHSIZE - 1) / HASHSIZE; - leaves.next_power_of_two() -} - -fn num_nodes(num_leaves: usize) -> usize { - 2 * num_leaves - 1 -} diff --git a/consensus/tree_hash/tests/tests.rs b/consensus/tree_hash/tests/tests.rs deleted file mode 100644 index 8b2a4b21be8..00000000000 --- a/consensus/tree_hash/tests/tests.rs +++ /dev/null @@ -1,128 +0,0 @@ -use ssz_derive::Encode; -use tree_hash::{Hash256, MerkleHasher, PackedEncoding, TreeHash, BYTES_PER_CHUNK}; -use tree_hash_derive::TreeHash; - -#[derive(Encode)] -struct HashVec { - vec: Vec, -} - -impl From> for HashVec { - fn from(vec: Vec) -> Self { - Self { vec } - } -} - -impl tree_hash::TreeHash for HashVec { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let mut hasher = - MerkleHasher::with_leaves((self.vec.len() + BYTES_PER_CHUNK - 1) / BYTES_PER_CHUNK); - - for item in &self.vec { - hasher.write(&item.tree_hash_packed_encoding()).unwrap() - } - - let root = hasher.finish().unwrap(); - - tree_hash::mix_in_length(&root, self.vec.len()) - } -} - -fn mix_in_selector(a: Hash256, selector: u8) -> Hash256 { - let mut b = [0; 32]; - b[0] = selector; - - Hash256::from_slice(ð2_hashing::hash32_concat(a.as_bytes(), &b)) -} - -fn u8_hash_concat(v1: u8, v2: u8) -> Hash256 { - let mut a = [0; 32]; - let mut b = [0; 32]; - - a[0] = v1; - b[0] = v2; - - Hash256::from_slice(ð2_hashing::hash32_concat(&a, &b)) -} - -fn u8_hash(x: u8) -> Hash256 { - let mut a = [0; 32]; - a[0] = x; - Hash256::from_slice(&a) -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "transparent")] -enum FixedTrans { - A(u8), - B(u8), -} - -#[test] -fn fixed_trans() { - assert_eq!(FixedTrans::A(2).tree_hash_root(), u8_hash(2)); - assert_eq!(FixedTrans::B(2).tree_hash_root(), u8_hash(2)); -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "union")] -enum FixedUnion { - A(u8), - B(u8), -} - -#[test] -fn fixed_union() { - assert_eq!(FixedUnion::A(2).tree_hash_root(), u8_hash_concat(2, 0)); - assert_eq!(FixedUnion::B(2).tree_hash_root(), u8_hash_concat(2, 1)); -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "transparent")] -enum VariableTrans { - A(HashVec), - B(HashVec), -} - -#[test] -fn variable_trans() { - assert_eq!( - VariableTrans::A(HashVec::from(vec![2])).tree_hash_root(), - u8_hash_concat(2, 1) - ); - assert_eq!( - VariableTrans::B(HashVec::from(vec![2])).tree_hash_root(), - u8_hash_concat(2, 1) - ); -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "union")] -enum VariableUnion { - A(HashVec), - B(HashVec), -} - -#[test] -fn variable_union() { - assert_eq!( - VariableUnion::A(HashVec::from(vec![2])).tree_hash_root(), - mix_in_selector(u8_hash_concat(2, 1), 0) - ); - assert_eq!( - VariableUnion::B(HashVec::from(vec![2])).tree_hash_root(), - mix_in_selector(u8_hash_concat(2, 1), 1) - ); -} diff --git a/consensus/tree_hash_derive/Cargo.toml b/consensus/tree_hash_derive/Cargo.toml deleted file mode 100644 index 5f3396eb163..00000000000 --- a/consensus/tree_hash_derive/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "tree_hash_derive" -version = "0.4.0" -authors = ["Paul Hauner "] -edition = "2021" -description = "Procedural derive macros to accompany the tree_hash crate." -license = "Apache-2.0" - -[lib] -proc-macro = true - -[dependencies] -syn = "1.0.42" -quote = "1.0.7" -darling = "0.13.0" diff --git a/consensus/tree_hash_derive/src/lib.rs b/consensus/tree_hash_derive/src/lib.rs deleted file mode 100644 index 85ece80fb56..00000000000 --- a/consensus/tree_hash_derive/src/lib.rs +++ /dev/null @@ -1,336 +0,0 @@ -use darling::FromDeriveInput; -use proc_macro::TokenStream; -use quote::quote; -use std::convert::TryInto; -use syn::{parse_macro_input, Attribute, DataEnum, DataStruct, DeriveInput, Meta}; - -/// The highest possible union selector value (higher values are reserved for backwards compatible -/// extensions). -const MAX_UNION_SELECTOR: u8 = 127; - -#[derive(Debug, FromDeriveInput)] -#[darling(attributes(tree_hash))] -struct StructOpts { - #[darling(default)] - enum_behaviour: Option, -} - -const ENUM_TRANSPARENT: &str = "transparent"; -const ENUM_UNION: &str = "union"; -const ENUM_VARIANTS: &[&str] = &[ENUM_TRANSPARENT, ENUM_UNION]; -const NO_ENUM_BEHAVIOUR_ERROR: &str = "enums require an \"enum_behaviour\" attribute, \ - e.g., #[tree_hash(enum_behaviour = \"transparent\")]"; - -enum EnumBehaviour { - Transparent, - Union, -} - -impl EnumBehaviour { - pub fn new(s: Option) -> Option { - s.map(|s| match s.as_ref() { - ENUM_TRANSPARENT => EnumBehaviour::Transparent, - ENUM_UNION => EnumBehaviour::Union, - other => panic!( - "{} is an invalid enum_behaviour, use either {:?}", - other, ENUM_VARIANTS - ), - }) - } -} - -/// Return a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields -/// that should not be hashed. -/// -/// # Panics -/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time. -fn get_hashable_fields(struct_data: &syn::DataStruct) -> Vec<&syn::Ident> { - get_hashable_fields_and_their_caches(struct_data) - .into_iter() - .map(|(ident, _, _)| ident) - .collect() -} - -/// Return a Vec of the hashable fields of a struct, and each field's type and optional cache field. -fn get_hashable_fields_and_their_caches( - struct_data: &syn::DataStruct, -) -> Vec<(&syn::Ident, syn::Type, Option)> { - struct_data - .fields - .iter() - .filter_map(|f| { - if should_skip_hashing(f) { - None - } else { - let ident = f - .ident - .as_ref() - .expect("tree_hash_derive only supports named struct fields"); - let opt_cache_field = get_cache_field_for(f); - Some((ident, f.ty.clone(), opt_cache_field)) - } - }) - .collect() -} - -/// Parse the cached_tree_hash attribute for a field. -/// -/// Extract the cache field name from `#[cached_tree_hash(cache_field_name)]` -/// -/// Return `Some(cache_field_name)` if the field has a cached tree hash attribute, -/// or `None` otherwise. -fn get_cache_field_for(field: &syn::Field) -> Option { - use syn::{MetaList, NestedMeta}; - - let parsed_attrs = cached_tree_hash_attr_metas(&field.attrs); - if let [Meta::List(MetaList { nested, .. })] = &parsed_attrs[..] { - nested.iter().find_map(|x| match x { - NestedMeta::Meta(Meta::Path(path)) => path.get_ident().cloned(), - _ => None, - }) - } else { - None - } -} - -/// Process the `cached_tree_hash` attributes from a list of attributes into structured `Meta`s. -fn cached_tree_hash_attr_metas(attrs: &[Attribute]) -> Vec { - attrs - .iter() - .filter(|attr| attr.path.is_ident("cached_tree_hash")) - .flat_map(|attr| attr.parse_meta()) - .collect() -} - -/// Returns true if some field has an attribute declaring it should not be hashed. -/// -/// The field attribute is: `#[tree_hash(skip_hashing)]` -fn should_skip_hashing(field: &syn::Field) -> bool { - field.attrs.iter().any(|attr| { - attr.path.is_ident("tree_hash") - && attr.tokens.to_string().replace(' ', "") == "(skip_hashing)" - }) -} - -/// Implements `tree_hash::TreeHash` for some `struct`. -/// -/// Fields are hashed in the order they are defined. -#[proc_macro_derive(TreeHash, attributes(tree_hash))] -pub fn tree_hash_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - let opts = StructOpts::from_derive_input(&item).unwrap(); - let enum_opt = EnumBehaviour::new(opts.enum_behaviour); - - match &item.data { - syn::Data::Struct(s) => { - if enum_opt.is_some() { - panic!("enum_behaviour is invalid for structs"); - } - tree_hash_derive_struct(&item, s) - } - syn::Data::Enum(s) => match enum_opt.expect(NO_ENUM_BEHAVIOUR_ERROR) { - EnumBehaviour::Transparent => tree_hash_derive_enum_transparent(&item, s), - EnumBehaviour::Union => tree_hash_derive_enum_union(&item, s), - }, - _ => panic!("tree_hash_derive only supports structs and enums."), - } -} - -fn tree_hash_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> TokenStream { - let name = &item.ident; - let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - - let idents = get_hashable_fields(struct_data); - let num_leaves = idents.len(); - - let output = quote! { - impl #impl_generics tree_hash::TreeHash for #name #ty_generics #where_clause { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Container - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Struct should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Struct should never be packed.") - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - let mut hasher = tree_hash::MerkleHasher::with_leaves(#num_leaves); - - #( - hasher.write(self.#idents.tree_hash_root().as_bytes()) - .expect("tree hash derive should not apply too many leaves"); - )* - - hasher.finish().expect("tree hash derive should not have a remaining buffer") - } - } - }; - output.into() -} - -/// Derive `TreeHash` for an enum in the "transparent" method. -/// -/// The "transparent" method is distinct from the "union" method specified in the SSZ specification. -/// When using "transparent", the enum will be ignored and the contained field will be hashed as if -/// the enum does not exist. -/// -///## Limitations -/// -/// Only supports: -/// - Enums with a single field per variant, where -/// - All fields are "container" types. -/// -/// ## Panics -/// -/// Will panic at compile-time if the single field requirement isn't met, but will panic *at run -/// time* if the container type requirement isn't met. -fn tree_hash_derive_enum_transparent( - derive_input: &DeriveInput, - enum_data: &DataEnum, -) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let (patterns, type_exprs): (Vec<_>, Vec<_>) = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("TreeHash can only be derived for enums with 1 field per variant"); - } - - let pattern = quote! { - #name::#variant_name(ref inner) - }; - - let ty = &(&variant.fields).into_iter().next().unwrap().ty; - let type_expr = quote! { - <#ty as tree_hash::TreeHash>::tree_hash_type() - }; - (pattern, type_expr) - }) - .unzip(); - - let output = quote! { - impl #impl_generics tree_hash::TreeHash for #name #ty_generics #where_clause { - fn tree_hash_type() -> tree_hash::TreeHashType { - #( - assert_eq!( - #type_exprs, - tree_hash::TreeHashType::Container, - "all variants must be of container type" - ); - )* - tree_hash::TreeHashType::Container - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Enum should never be packed") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Enum should never be packed") - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - match self { - #( - #patterns => inner.tree_hash_root(), - )* - } - } - } - }; - output.into() -} - -/// Derive `TreeHash` for an `enum` following the "union" SSZ spec. -/// -/// The union selector will be determined based upon the order in which the enum variants are -/// defined. E.g., the top-most variant in the enum will have a selector of `0`, the variant -/// beneath it will have a selector of `1` and so on. -/// -/// # Limitations -/// -/// Only supports enums where each variant has a single field. -fn tree_hash_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let patterns: Vec<_> = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("TreeHash can only be derived for enums with 1 field per variant"); - } - - quote! { - #name::#variant_name(ref inner) - } - }) - .collect(); - - let union_selectors = compute_union_selectors(patterns.len()); - - let output = quote! { - impl #impl_generics tree_hash::TreeHash for #name #ty_generics #where_clause { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Container - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Enum should never be packed") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Enum should never be packed") - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - match self { - #( - #patterns => { - let root = inner.tree_hash_root(); - let selector = #union_selectors; - tree_hash::mix_in_selector(&root, selector) - .expect("derive macro should prevent out-of-bounds selectors") - }, - )* - } - } - } - }; - output.into() -} - -fn compute_union_selectors(num_variants: usize) -> Vec { - let union_selectors = (0..num_variants) - .map(|i| { - i.try_into() - .expect("union selector exceeds u8::max_value, union has too many variants") - }) - .collect::>(); - - let highest_selector = union_selectors - .last() - .copied() - .expect("0-variant union is not permitted"); - - assert!( - highest_selector <= MAX_UNION_SELECTOR, - "union selector {} exceeds limit of {}, enum has too many variants", - highest_selector, - MAX_UNION_SELECTOR - ); - - union_selectors -} diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 39a0a28c0ce..20d66cd4471 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -27,7 +27,7 @@ use tree_hash_derive::TreeHash; #[arbitrary(bound = "T: EthSpec")] pub struct AggregateAndProof { /// The index of the validator that created the attestation. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub aggregator_index: u64, /// The aggregate attestation. pub aggregate: Attestation, diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index c6a661c85dd..286502b4497 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -27,7 +27,7 @@ use tree_hash_derive::TreeHash; )] pub struct AttestationData { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, // LMD GHOST vote diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation_duty.rs index 87a9c932a45..93a4c147b67 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation_duty.rs @@ -12,6 +12,6 @@ pub struct AttestationDuty { /// The total number of attesters in the committee. pub committee_len: usize, /// The committee count at `attestation_slot`. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committees_at_slot: u64, } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 0f26cd0e5e7..9194bf46c0d 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -58,7 +58,7 @@ pub struct BeaconBlock = FullPayload #[superstruct(getter(copy))] pub slot: Slot, #[superstruct(getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, #[superstruct(getter(copy))] pub parent_root: Hash256, diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index c6d6678f31a..f2ef0a3dccd 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -26,7 +26,7 @@ use tree_hash_derive::TreeHash; )] pub struct BeaconBlockHeader { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, pub parent_root: Hash256, pub state_root: Hash256, diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index b279515bd1f..3ed9ee9255e 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -21,7 +21,7 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct BlsToExecutionChange { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, pub from_bls_pubkey: PublicKeyBytes, pub to_execution_address: Address, diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index e922e81c706..8723c2afed9 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -16,7 +16,7 @@ use tree_hash_derive::TreeHash; pub struct BuilderBid> { #[serde_as(as = "BlindedPayloadAsHeader")] pub header: Payload, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] pub value: Uint256, pub pubkey: PublicKeyBytes, #[serde(skip)] @@ -50,7 +50,7 @@ impl> ForkVersionDeserialize #[derive(Deserialize)] struct Helper { header: serde_json::Value, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] value: Uint256, pubkey: PublicKeyBytes, } diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/contribution_and_proof.rs index 167b0857c5a..7e757f89b1a 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/contribution_and_proof.rs @@ -25,7 +25,7 @@ use tree_hash_derive::TreeHash; #[arbitrary(bound = "T: EthSpec")] pub struct ContributionAndProof { /// The index of the validator that created the sync contribution. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub aggregator_index: u64, /// The aggregate contribution. pub contribution: SyncCommitteeContribution, diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index 1969311671f..d75643f6597 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -26,7 +26,7 @@ use tree_hash_derive::TreeHash; pub struct DepositData { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, pub signature: SignatureBytes, } diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index 63073401c22..1096cfaa283 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -25,7 +25,7 @@ use tree_hash_derive::TreeHash; pub struct DepositMessage { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, } diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs index 21bbab81fff..aea4677f265 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -1,5 +1,5 @@ use crate::*; -use eth2_hashing::{hash32_concat, ZERO_HASHES}; +use ethereum_hashing::{hash32_concat, ZERO_HASHES}; use int_to_bytes::int_to_bytes32; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index 3556e31a9fc..409383c9048 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -24,9 +24,9 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct EnrForkId { - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub fork_digest: [u8; 4], - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub next_fork_version: [u8; 4], pub next_fork_epoch: Epoch, } diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index 6b2396e112c..d8f476b99b5 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -26,7 +26,7 @@ use tree_hash_derive::TreeHash; )] pub struct Eth1Data { pub deposit_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub deposit_count: u64, pub block_hash: Hash256, } diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 7762afe9184..1ee5a388b35 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -60,21 +60,21 @@ pub struct ExecutionPayload { pub logs_bloom: FixedVector, #[superstruct(getter(copy))] pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub block_number: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, #[superstruct(only(Eip4844))] diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 4dc79ddc999..cee67b2431f 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -53,21 +53,21 @@ pub struct ExecutionPayloadHeader { pub logs_bloom: FixedVector, #[superstruct(getter(copy))] pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub block_number: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, #[superstruct(only(Eip4844))] diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index de332f0cada..4650881f72d 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -24,9 +24,9 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct Fork { - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub previous_version: [u8; 4], - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], pub epoch: Epoch, } diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index cc790393159..bf9c48cd7eb 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -23,7 +23,7 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct ForkData { - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], pub genesis_validators_root: Hash256, } diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index 6288cdbe807..bd4abe37d8f 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -27,7 +27,7 @@ impl Graffiti { impl fmt::Display for Graffiti { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) + write!(f, "{}", serde_utils::hex::encode(self.0)) } } @@ -96,7 +96,7 @@ pub mod serde_graffiti { where S: Serializer, { - serializer.serialize_str(ð2_serde_utils::hex::encode(bytes)) + serializer.serialize_str(&serde_utils::hex::encode(bytes)) } pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; GRAFFITI_BYTES_LEN], D::Error> @@ -105,7 +105,7 @@ pub mod serde_graffiti { { let s: String = Deserialize::deserialize(deserializer)?; - let bytes = eth2_serde_utils::hex::decode(&s).map_err(D::Error::custom)?; + let bytes = serde_utils::hex::decode(&s).map_err(D::Error::custom)?; if bytes.len() != GRAFFITI_BYTES_LEN { return Err(D::Error::custom(format!( diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index 16ffb1ad8fa..c59cbef307e 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -72,9 +72,9 @@ impl Hash for IndexedAttestation { mod quoted_variable_list_u64 { use super::*; use crate::Unsigned; - use eth2_serde_utils::quoted_u64_vec::{QuotedIntVecVisitor, QuotedIntWrapper}; use serde::ser::SerializeSeq; use serde::{Deserializer, Serializer}; + use serde_utils::quoted_u64_vec::{QuotedIntVecVisitor, QuotedIntWrapper}; pub fn serialize(value: &VariableList, serializer: S) -> Result where diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/participation_flags.rs index bd98f8da078..4f170a60be8 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/participation_flags.rs @@ -9,7 +9,7 @@ use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; #[serde(transparent)] #[derive(arbitrary::Arbitrary)] pub struct ParticipationFlags { - #[serde(with = "eth2_serde_utils::quoted_u8")] + #[serde(with = "serde_utils::quoted_u8")] bits: u8, } diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index 1b9903ebbe5..88db0ec4d33 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -25,9 +25,9 @@ use tree_hash_derive::TreeHash; pub struct PendingAttestation { pub aggregation_bits: BitList, pub data: AttestationData, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inclusion_delay: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, } diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 20c78f05159..e65dd8f60de 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -12,71 +12,71 @@ use serde_derive::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct BasePreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_committees_per_slot: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub target_committee_size: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_validators_per_committee: u64, - #[serde(with = "eth2_serde_utils::quoted_u8")] + #[serde(with = "serde_utils::quoted_u8")] pub shuffle_round_count: u8, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_downward_multiplier: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_upward_multiplier: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub safe_slots_to_update_justified: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_deposit_amount: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_effective_balance: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub effective_balance_increment: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_attestation_inclusion_delay: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub slots_per_epoch: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_seed_lookahead: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_seed_lookahead: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_eth1_voting_period: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub slots_per_historical_root: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_epochs_to_inactivity_penalty: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_historical_vector: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_slashings_vector: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub historical_roots_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_registry_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub base_reward_factor: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub whistleblower_reward_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_reward_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inactivity_penalty_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_slashing_penalty_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proportional_slashing_multiplier: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_proposer_slashings: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_attester_slashings: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_attestations: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_deposits: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_voluntary_exits: u64, } @@ -123,17 +123,17 @@ impl BasePreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct AltairPreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inactivity_penalty_quotient_altair: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_slashing_penalty_quotient_altair: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proportional_slashing_multiplier_altair: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub sync_committee_size: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_sync_committee_period: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_sync_committee_participants: u64, } @@ -153,19 +153,19 @@ impl AltairPreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct BellatrixPreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inactivity_penalty_quotient_bellatrix: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_slashing_penalty_quotient_bellatrix: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proportional_slashing_multiplier_bellatrix: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_bytes_per_transaction: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_transactions_per_payload: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub bytes_per_logs_bloom: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_extra_data_bytes: u64, } @@ -187,11 +187,11 @@ impl BellatrixPreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct CapellaPreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_bls_to_execution_changes: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_withdrawals_per_payload: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_validators_per_withdrawals_sweep: u64, } diff --git a/consensus/types/src/proposer_preparation_data.rs b/consensus/types/src/proposer_preparation_data.rs index 6179828a950..2828b0d4d55 100644 --- a/consensus/types/src/proposer_preparation_data.rs +++ b/consensus/types/src/proposer_preparation_data.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct ProposerPreparationData { /// The validators index. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, /// The fee-recipient address. pub fee_recipient: Address, diff --git a/consensus/types/src/selection_proof.rs b/consensus/types/src/selection_proof.rs index f8bc8ba69fb..2a404b3b963 100644 --- a/consensus/types/src/selection_proof.rs +++ b/consensus/types/src/selection_proof.rs @@ -1,7 +1,7 @@ use crate::{ ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, }; -use eth2_hashing::hash; +use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use ssz::Encode; use std::cmp; diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 06f99b98888..991261d16ad 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -38,7 +38,7 @@ use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssi Deserialize, )] #[serde(transparent)] -pub struct Slot(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct Slot(#[serde(with = "serde_utils::quoted_u64")] u64); #[derive( arbitrary::Arbitrary, @@ -54,7 +54,7 @@ pub struct Slot(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); Deserialize, )] #[serde(transparent)] -pub struct Epoch(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct Epoch(#[serde(with = "serde_utils::quoted_u64")] u64); impl_common!(Slot); impl_common!(Epoch); diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index fd06eb78a12..b885f89f7d4 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -20,7 +20,7 @@ lazy_static! { #[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] -pub struct SubnetId(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct SubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); pub fn subnet_id_to_string(i: u64) -> &'static str { if i < MAX_SUBNET_ID as u64 { @@ -85,7 +85,7 @@ impl SubnetId { let subscription_event_idx = epoch.as_u64() / spec.epochs_per_subnet_subscription; let permutation_seed = - eth2_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); + ethereum_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); let num_subnets = 1 << spec.attestation_subnet_prefix_bits(); diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_aggregator_selection_data.rs index 9e72438be20..b101068123b 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_aggregator_selection_data.rs @@ -21,7 +21,7 @@ use tree_hash_derive::TreeHash; )] pub struct SyncAggregatorSelectionData { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub subcommittee_index: u64, } diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index ef8b52becfc..425f8f116d4 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -32,7 +32,7 @@ pub enum Error { pub struct SyncCommitteeContribution { pub slot: Slot, pub beacon_block_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub subcommittee_index: u64, pub aggregation_bits: BitVector, pub signature: AggregateSignature, diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee_message.rs index 5c2fb083743..d0301cdf638 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee_message.rs @@ -23,7 +23,7 @@ use tree_hash_derive::TreeHash; pub struct SyncCommitteeMessage { pub slot: Slot, pub beacon_block_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, // Signature by the validator over `beacon_block_root`. pub signature: Signature, diff --git a/consensus/types/src/sync_committee_subscription.rs b/consensus/types/src/sync_committee_subscription.rs index 7f5ed063f62..8e040279d73 100644 --- a/consensus/types/src/sync_committee_subscription.rs +++ b/consensus/types/src/sync_committee_subscription.rs @@ -7,10 +7,10 @@ use ssz_derive::{Decode, Encode}; #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] pub struct SyncCommitteeSubscription { /// The validators index. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, /// The sync committee indices. - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub sync_committee_indices: Vec, /// Epoch until which this subscription is required. pub until_epoch: Epoch, diff --git a/consensus/types/src/sync_duty.rs b/consensus/types/src/sync_duty.rs index bdb07845968..e3ffe62bfd1 100644 --- a/consensus/types/src/sync_duty.rs +++ b/consensus/types/src/sync_duty.rs @@ -7,9 +7,9 @@ use std::collections::HashSet; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncDuty { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub validator_sync_committee_indices: Vec, } diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_selection_proof.rs index 570abace1eb..7cae3946c6b 100644 --- a/consensus/types/src/sync_selection_proof.rs +++ b/consensus/types/src/sync_selection_proof.rs @@ -5,7 +5,7 @@ use crate::{ ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, SyncAggregatorSelectionData, }; -use eth2_hashing::hash; +use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use ssz::Encode; use ssz_types::typenum::Unsigned; diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index 11bcf268941..5af756ae013 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -21,7 +21,7 @@ lazy_static! { #[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] -pub struct SyncSubnetId(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct SyncSubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); pub fn sync_subnet_id_to_string(i: u64) -> &'static str { if i < SYNC_COMMITTEE_SUBNET_COUNT { diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 43b892cdf3d..6860397fb5b 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -25,7 +25,7 @@ use tree_hash_derive::TreeHash; pub struct Validator { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub effective_balance: u64, pub slashed: bool, pub activation_eligibility_epoch: Epoch, diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator_registration_data.rs index 5a3450df081..de7f26cc632 100644 --- a/consensus/types/src/validator_registration_data.rs +++ b/consensus/types/src/validator_registration_data.rs @@ -13,9 +13,9 @@ pub struct SignedValidatorRegistrationData { #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode, TreeHash)] pub struct ValidatorRegistrationData { pub fee_recipient: Address, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub timestamp: u64, pub pubkey: PublicKeyBytes, } diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 20c84986c29..02686fef9ad 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -27,7 +27,7 @@ use tree_hash_derive::TreeHash; pub struct VoluntaryExit { /// Earliest epoch when voluntary exit can be processed. pub epoch: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, } diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index 5221ff63f09..eed7c7e277f 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -20,12 +20,12 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct Withdrawal { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, pub address: Address, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, } diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index c3331824d9e..a610f257cdb 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -5,15 +5,15 @@ authors = ["Paul Hauner "] edition = "2021" [dependencies] -eth2_ssz = "0.4.1" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +tree_hash = "0.5.0" milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.4.2", optional = true } rand = "0.7.3" serde = "1.0.116" serde_derive = "1.0.116" -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" hex = "0.4.2" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" ethereum-types = "0.14.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } zeroize = { version = "1.4.2", features = ["zeroize_derive"] } diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index a61529af250..e6e53253f64 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -4,9 +4,9 @@ use crate::{ generic_signature::{GenericSignature, TSignature}, Error, Hash256, INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/crypto/bls/src/generic_public_key.rs b/crypto/bls/src/generic_public_key.rs index 847d039c62b..462e4cb2cb0 100644 --- a/crypto/bls/src/generic_public_key.rs +++ b/crypto/bls/src/generic_public_key.rs @@ -1,8 +1,8 @@ use crate::generic_public_key_bytes::GenericPublicKeyBytes; use crate::Error; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/crypto/bls/src/generic_public_key_bytes.rs b/crypto/bls/src/generic_public_key_bytes.rs index c2f318ab65d..59b0ffc43f1 100644 --- a/crypto/bls/src/generic_public_key_bytes.rs +++ b/crypto/bls/src/generic_public_key_bytes.rs @@ -2,9 +2,9 @@ use crate::{ generic_public_key::{GenericPublicKey, TPublicKey}, Error, PUBLIC_KEY_BYTES_LEN, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index 01e5ed1d481..05e0a222bd5 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -2,9 +2,9 @@ use crate::{ generic_public_key::{GenericPublicKey, TPublicKey}, Error, Hash256, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/crypto/bls/src/generic_signature_bytes.rs b/crypto/bls/src/generic_signature_bytes.rs index aa33c90d0c3..8f9f2a4d88e 100644 --- a/crypto/bls/src/generic_signature_bytes.rs +++ b/crypto/bls/src/generic_signature_bytes.rs @@ -3,9 +3,9 @@ use crate::{ generic_signature::{GenericSignature, TSignature}, Error, INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; diff --git a/crypto/bls/src/get_withdrawal_credentials.rs b/crypto/bls/src/get_withdrawal_credentials.rs index 98106434f14..d5e6470504a 100644 --- a/crypto/bls/src/get_withdrawal_credentials.rs +++ b/crypto/bls/src/get_withdrawal_credentials.rs @@ -1,5 +1,5 @@ use crate::PublicKey; -use eth2_hashing::hash; +use ethereum_hashing::hash; use ssz::Encode; /// Returns the withdrawal credentials for a given public key. diff --git a/crypto/eth2_hashing/.cargo/config b/crypto/eth2_hashing/.cargo/config deleted file mode 100644 index 4ec2f3b8620..00000000000 --- a/crypto/eth2_hashing/.cargo/config +++ /dev/null @@ -1,2 +0,0 @@ -[target.wasm32-unknown-unknown] -runner = 'wasm-bindgen-test-runner' diff --git a/crypto/eth2_hashing/Cargo.toml b/crypto/eth2_hashing/Cargo.toml deleted file mode 100644 index db296c70fe9..00000000000 --- a/crypto/eth2_hashing/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "eth2_hashing" -version = "0.3.0" -authors = ["Paul Hauner "] -edition = "2021" -license = "Apache-2.0" -description = "Hashing primitives used in Ethereum 2.0" - -[dependencies] -lazy_static = { version = "1.4.0", optional = true } -cpufeatures = { version = "0.2.5", optional = true } -ring = "0.16.19" -sha2 = "0.10" - -[dev-dependencies] -rustc-hex = "2.1.0" - -[target.'cfg(target_arch = "wasm32")'.dev-dependencies] -wasm-bindgen-test = "0.3.18" - -[features] -default = ["zero_hash_cache", "detect-cpufeatures"] -zero_hash_cache = ["lazy_static"] -detect-cpufeatures = ["cpufeatures"] diff --git a/crypto/eth2_hashing/src/lib.rs b/crypto/eth2_hashing/src/lib.rs deleted file mode 100644 index 36a3d141391..00000000000 --- a/crypto/eth2_hashing/src/lib.rs +++ /dev/null @@ -1,251 +0,0 @@ -//! Optimized SHA256 for use in Ethereum 2.0. -//! -//! The initial purpose of this crate was to provide an abstraction over the hash function used in -//! Ethereum 2.0. The hash function changed during the specification process, so defining it once in -//! this crate made it easy to replace. -//! -//! Now this crate serves primarily as a wrapper over two SHA256 crates: `sha2` and `ring` – -//! which it switches between at runtime based on the availability of SHA intrinsics. - -pub use self::DynamicContext as Context; -use sha2::Digest; - -#[cfg(feature = "zero_hash_cache")] -use lazy_static::lazy_static; - -/// Length of a SHA256 hash in bytes. -pub const HASH_LEN: usize = 32; - -/// Returns the digest of `input` using the best available implementation. -pub fn hash(input: &[u8]) -> Vec { - DynamicImpl::best().hash(input) -} - -/// Hash function returning a fixed-size array (to save on allocations). -/// -/// Uses the best available implementation based on CPU features. -pub fn hash_fixed(input: &[u8]) -> [u8; HASH_LEN] { - DynamicImpl::best().hash_fixed(input) -} - -/// Compute the hash of two slices concatenated. -pub fn hash32_concat(h1: &[u8], h2: &[u8]) -> [u8; 32] { - let mut ctxt = DynamicContext::new(); - ctxt.update(h1); - ctxt.update(h2); - ctxt.finalize() -} - -/// Context trait for abstracting over implementation contexts. -pub trait Sha256Context { - fn new() -> Self; - - fn update(&mut self, bytes: &[u8]); - - fn finalize(self) -> [u8; HASH_LEN]; -} - -/// Top-level trait implemented by both `sha2` and `ring` implementations. -pub trait Sha256 { - type Context: Sha256Context; - - fn hash(&self, input: &[u8]) -> Vec; - - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN]; -} - -/// Implementation of SHA256 using the `sha2` crate (fastest on CPUs with SHA extensions). -struct Sha2CrateImpl; - -impl Sha256Context for sha2::Sha256 { - fn new() -> Self { - sha2::Digest::new() - } - - fn update(&mut self, bytes: &[u8]) { - sha2::Digest::update(self, bytes) - } - - fn finalize(self) -> [u8; HASH_LEN] { - sha2::Digest::finalize(self).into() - } -} - -impl Sha256 for Sha2CrateImpl { - type Context = sha2::Sha256; - - fn hash(&self, input: &[u8]) -> Vec { - Self::Context::digest(input).into_iter().collect() - } - - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN] { - Self::Context::digest(input).into() - } -} - -/// Implementation of SHA256 using the `ring` crate (fastest on CPUs without SHA extensions). -pub struct RingImpl; - -impl Sha256Context for ring::digest::Context { - fn new() -> Self { - Self::new(&ring::digest::SHA256) - } - - fn update(&mut self, bytes: &[u8]) { - self.update(bytes) - } - - fn finalize(self) -> [u8; HASH_LEN] { - let mut output = [0; HASH_LEN]; - output.copy_from_slice(self.finish().as_ref()); - output - } -} - -impl Sha256 for RingImpl { - type Context = ring::digest::Context; - - fn hash(&self, input: &[u8]) -> Vec { - ring::digest::digest(&ring::digest::SHA256, input) - .as_ref() - .into() - } - - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN] { - let mut ctxt = Self::Context::new(&ring::digest::SHA256); - ctxt.update(input); - ctxt.finalize() - } -} - -/// Default dynamic implementation that switches between available implementations. -pub enum DynamicImpl { - Sha2, - Ring, -} - -// Runtime latch for detecting the availability of SHA extensions on x86_64. -// -// Inspired by the runtime switch within the `sha2` crate itself. -#[cfg(all(feature = "detect-cpufeatures", target_arch = "x86_64"))] -cpufeatures::new!(x86_sha_extensions, "sha", "sse2", "ssse3", "sse4.1"); - -#[inline(always)] -pub fn have_sha_extensions() -> bool { - #[cfg(all(feature = "detect-cpufeatures", target_arch = "x86_64"))] - return x86_sha_extensions::get(); - - #[cfg(not(all(feature = "detect-cpufeatures", target_arch = "x86_64")))] - return false; -} - -impl DynamicImpl { - /// Choose the best available implementation based on the currently executing CPU. - #[inline(always)] - pub fn best() -> Self { - if have_sha_extensions() { - Self::Sha2 - } else { - Self::Ring - } - } -} - -impl Sha256 for DynamicImpl { - type Context = DynamicContext; - - #[inline(always)] - fn hash(&self, input: &[u8]) -> Vec { - match self { - Self::Sha2 => Sha2CrateImpl.hash(input), - Self::Ring => RingImpl.hash(input), - } - } - - #[inline(always)] - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN] { - match self { - Self::Sha2 => Sha2CrateImpl.hash_fixed(input), - Self::Ring => RingImpl.hash_fixed(input), - } - } -} - -/// Context encapsulating all implemenation contexts. -/// -/// This enum ends up being 8 bytes larger than the largest inner context. -pub enum DynamicContext { - Sha2(sha2::Sha256), - Ring(ring::digest::Context), -} - -impl Sha256Context for DynamicContext { - fn new() -> Self { - match DynamicImpl::best() { - DynamicImpl::Sha2 => Self::Sha2(Sha256Context::new()), - DynamicImpl::Ring => Self::Ring(Sha256Context::new()), - } - } - - fn update(&mut self, bytes: &[u8]) { - match self { - Self::Sha2(ctxt) => Sha256Context::update(ctxt, bytes), - Self::Ring(ctxt) => Sha256Context::update(ctxt, bytes), - } - } - - fn finalize(self) -> [u8; HASH_LEN] { - match self { - Self::Sha2(ctxt) => Sha256Context::finalize(ctxt), - Self::Ring(ctxt) => Sha256Context::finalize(ctxt), - } - } -} - -/// The max index that can be used with `ZERO_HASHES`. -#[cfg(feature = "zero_hash_cache")] -pub const ZERO_HASHES_MAX_INDEX: usize = 48; - -#[cfg(feature = "zero_hash_cache")] -lazy_static! { - /// Cached zero hashes where `ZERO_HASHES[i]` is the hash of a Merkle tree with 2^i zero leaves. - pub static ref ZERO_HASHES: Vec> = { - let mut hashes = vec![vec![0; 32]; ZERO_HASHES_MAX_INDEX + 1]; - - for i in 0..ZERO_HASHES_MAX_INDEX { - hashes[i + 1] = hash32_concat(&hashes[i], &hashes[i])[..].to_vec(); - } - - hashes - }; -} - -#[cfg(test)] -mod tests { - use super::*; - use rustc_hex::FromHex; - - #[cfg(target_arch = "wasm32")] - use wasm_bindgen_test::*; - - #[cfg_attr(not(target_arch = "wasm32"), test)] - #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] - fn test_hashing() { - let input: Vec = b"hello world".as_ref().into(); - - let output = hash(input.as_ref()); - let expected_hex = "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"; - let expected: Vec = expected_hex.from_hex().unwrap(); - assert_eq!(expected, output); - } - - #[cfg(feature = "zero_hash_cache")] - mod zero_hash { - use super::*; - - #[test] - fn zero_hash_zero() { - assert_eq!(ZERO_HASHES[0], vec![0; 32]); - } - } -} diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 9ba9517c3af..ca832b978ee 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -33,7 +33,7 @@ slog = { version = "2.5.2", features = ["max_level_trace"] } sloggers = { version = "2.1.1", features = ["json"] } types = { "path" = "../consensus/types" } bls = { path = "../crypto/bls" } -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" clap = "2.33.3" env_logger = "0.9.0" environment = { path = "./environment" } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index b05e78fe5a7..e0587c3416b 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -6,8 +6,8 @@ use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, get_eth2_network_config}; use directory::{parse_path_or_default, DEFAULT_BEACON_NODE_DIR, DEFAULT_VALIDATOR_DIR}; use env_logger::{Builder, Env}; use environment::{EnvironmentBuilder, LoggerConfig}; -use eth2_hashing::have_sha_extensions; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODED_NET_NAMES}; +use ethereum_hashing::have_sha_extensions; use lighthouse_version::VERSION; use malloc_utils::configure_memory_allocator; use slog::{crit, info, warn}; diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index c5ce8793ad4..7f2ac456b57 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -12,8 +12,8 @@ lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] [dependencies] bincode = "1.3.1" byteorder = "1.3.4" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lazy_static = "1.4.0" lighthouse_metrics = { path = "../common/lighthouse_metrics" } @@ -26,8 +26,8 @@ serde = "1.0" serde_derive = "1.0" slog = "2.5.2" sloggers = { version = "2.1.1", features = ["json"] } -tree_hash = "0.4.1" -tree_hash_derive = "0.4.0" +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" types = { path = "../consensus/types" } strum = { version = "0.24.1", features = ["derive"] } diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 79664a26228..11283052f07 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -22,10 +22,10 @@ serde = "1.0.116" serde_derive = "1.0.116" serde_repr = "0.1.6" serde_yaml = "0.8.13" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" -tree_hash = "0.4.1" -tree_hash_derive = "0.4.0" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" cached_tree_hash = { path = "../../consensus/cached_tree_hash" } state_processing = { path = "../../consensus/state_processing" } swap_or_not_shuffle = { path = "../../consensus/swap_or_not_shuffle" } diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 6da9f2f4a6f..a25b3c31c1a 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" [dependencies] state_processing = { path = "../../consensus/state_processing" } types = { path = "../../consensus/types" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" beacon_chain = { path = "../../beacon_node/beacon_chain" } lazy_static = "1.4.0" tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index ada023f8c59..e0172afd2a1 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -13,7 +13,7 @@ tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } logging = { path = "../common/logging" } [dependencies] -tree_hash = "0.4.1" +tree_hash = "0.5.0" clap = "2.33.3" slashing_protection = { path = "./slashing_protection" } slot_clock = { path = "../common/slot_clock" } @@ -46,7 +46,7 @@ lighthouse_version = { path = "../common/lighthouse_version" } warp_utils = { path = "../common/warp_utils" } warp = "0.3.2" hyper = "0.14.4" -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" libsecp256k1 = "0.7.0" ring = "0.16.19" rand = { version = "0.8.5", features = ["small_rng"] } diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 631e54dc4eb..278dc22d0de 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -18,7 +18,7 @@ r2d2_sqlite = "0.21.0" serde = "1.0.116" serde_derive = "1.0.116" serde_json = "1.0.58" -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" filesystem = { path = "../../common/filesystem" } arbitrary = { version = "1.0", features = ["derive"], optional = true } diff --git a/validator_client/slashing_protection/src/interchange.rs b/validator_client/slashing_protection/src/interchange.rs index 3793766b6aa..99d37c38b9b 100644 --- a/validator_client/slashing_protection/src/interchange.rs +++ b/validator_client/slashing_protection/src/interchange.rs @@ -9,7 +9,7 @@ use types::{Epoch, Hash256, PublicKeyBytes, Slot}; #[serde(deny_unknown_fields)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct InterchangeMetadata { - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub interchange_format_version: u64, pub genesis_validators_root: Hash256, } @@ -27,7 +27,7 @@ pub struct InterchangeData { #[serde(deny_unknown_fields)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedBlock { - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub slot: Slot, #[serde(skip_serializing_if = "Option::is_none")] pub signing_root: Option, @@ -37,9 +37,9 @@ pub struct SignedBlock { #[serde(deny_unknown_fields)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedAttestation { - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub source_epoch: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub target_epoch: Epoch, #[serde(skip_serializing_if = "Option::is_none")] pub signing_root: Option, diff --git a/validator_client/src/http_api/api_secret.rs b/validator_client/src/http_api/api_secret.rs index b42cd11edd5..e688792ddc1 100644 --- a/validator_client/src/http_api/api_secret.rs +++ b/validator_client/src/http_api/api_secret.rs @@ -60,7 +60,7 @@ impl ApiSecret { // Create and write the secret key to file with appropriate permissions create_with_600_perms( &sk_path, - eth2_serde_utils::hex::encode(sk.serialize()).as_bytes(), + serde_utils::hex::encode(sk.serialize()).as_bytes(), ) .map_err(|e| { format!( @@ -75,7 +75,7 @@ impl ApiSecret { format!( "{}{}", PK_PREFIX, - eth2_serde_utils::hex::encode(&pk.serialize_compressed()[..]) + serde_utils::hex::encode(&pk.serialize_compressed()[..]) ) .as_bytes(), ) @@ -90,7 +90,7 @@ impl ApiSecret { let sk = fs::read(&sk_path) .map_err(|e| format!("cannot read {}: {}", SK_FILENAME, e)) .and_then(|bytes| { - eth2_serde_utils::hex::decode(&String::from_utf8_lossy(&bytes)) + serde_utils::hex::decode(&String::from_utf8_lossy(&bytes)) .map_err(|_| format!("{} should be 0x-prefixed hex", PK_FILENAME)) }) .and_then(|bytes| { @@ -114,7 +114,7 @@ impl ApiSecret { let hex = String::from_utf8(bytes).map_err(|_| format!("{} is not utf8", SK_FILENAME))?; if let Some(stripped) = hex.strip_prefix(PK_PREFIX) { - eth2_serde_utils::hex::decode(stripped) + serde_utils::hex::decode(stripped) .map_err(|_| format!("{} should be 0x-prefixed hex", SK_FILENAME)) } else { Err(format!("unable to parse {}", SK_FILENAME)) @@ -153,7 +153,7 @@ impl ApiSecret { /// Returns the public key of `self` as a 0x-prefixed hex string. fn pubkey_string(&self) -> String { - eth2_serde_utils::hex::encode(&self.pk.serialize_compressed()[..]) + serde_utils::hex::encode(&self.pk.serialize_compressed()[..]) } /// Returns the API token. @@ -205,7 +205,7 @@ impl ApiSecret { let message = Message::parse_slice(digest(&SHA256, input).as_ref()).expect("sha256 is 32 bytes"); let (signature, _) = libsecp256k1::sign(&message, &sk); - eth2_serde_utils::hex::encode(signature.serialize_der().as_ref()) + serde_utils::hex::encode(signature.serialize_der().as_ref()) } } } diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index a32ccce6279..f3107cfedbd 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -159,7 +159,7 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, gas_limit: request.gas_limit, builder_proposals: request.builder_proposals, voting_pubkey, - eth1_deposit_tx_data: eth2_serde_utils::hex::encode(ð1_deposit_data.rlp), + eth1_deposit_tx_data: serde_utils::hex::encode(ð1_deposit_data.rlp), deposit_gwei: request.deposit_gwei, }); } diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index df0e4804440..1c593b1a4ec 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -365,7 +365,7 @@ impl ApiTester { let withdrawal_keypair = keypairs.withdrawal.decrypt_keypair(PASSWORD_BYTES).unwrap(); let deposit_bytes = - eth2_serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap(); + serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap(); let (deposit_data, _) = decode_eth1_tx_data(&deposit_bytes, E::default_spec().max_effective_balance) diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 54352394af2..72813071fc5 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -55,9 +55,9 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: AbstractExecPayload> { Deposit { pubkey: PublicKeyBytes, withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] amount: u64, - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] genesis_fork_version: [u8; 4], }, RandaoReveal {