From 06e4d22d4954807ddc755feaea7518ad2ce06f35 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 14 Jan 2025 10:17:00 +1100 Subject: [PATCH] Electra spec changes for `v1.5.0-beta.0` (#6731) * First pass * Add restrictions to RuntimeVariableList api * Use empty_uninitialized and fix warnings * Fix some todos * Merge branch 'unstable' into max-blobs-preset * Fix take impl on RuntimeFixedList * cleanup * Fix test compilations * Fix some more tests * Fix test from unstable * Merge branch 'unstable' into max-blobs-preset * Implement "Bugfix and more withdrawal tests" * Implement "Add missed exit checks to consolidation processing" * Implement "Update initial earliest_exit_epoch calculation" * Implement "Limit consolidating balance by validator.effective_balance" * Implement "Use 16-bit random value in validator filter" * Implement "Do not change creds type on consolidation" * Rename PendingPartialWithdraw index field to validator_index * Skip slots to get test to pass and add TODO * Implement "Synchronously check all transactions to have non-zero length" * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Remove footgun function * Minor simplifications * Move from preset to config * Fix typo * Revert "Remove footgun function" This reverts commit de01f923c7452355c87f50c0e8031ca94fa00d36. * Try fixing tests * Implement "bump minimal preset MAX_BLOB_COMMITMENTS_PER_BLOCK and KZG_COMMITMENT_INCLUSION_PROOF_DEPTH" * Thread through ChainSpec * Fix release tests * Move RuntimeFixedVector into module and rename * Add test * Implement "Remove post-altair `initialize_beacon_state_from_eth1` from specs" * Update preset YAML * Remove empty RuntimeVarList awefullness * Make max_blobs_per_block a config parameter (#6329) Squashed commit of the following: commit 04b3743ec1e0b650269dd8e58b540c02430d1c0d Author: Michael Sproul Date: Mon Jan 6 17:36:58 2025 +1100 Add test commit 440e85419940d4daba406d910e7908dd1fe78668 Author: Michael Sproul Date: Mon Jan 6 17:24:50 2025 +1100 Move RuntimeFixedVector into module and rename commit f66e179a40c3917eee39a93534ecf75480172699 Author: Michael Sproul Date: Mon Jan 6 17:17:17 2025 +1100 Fix release tests commit e4bfe71cd1f0a2784d0bd57f85b2f5d8cf503ac1 Author: Michael Sproul Date: Mon Jan 6 17:05:30 2025 +1100 Thread through ChainSpec commit 063b79c16abd3f6df47b85efcf3858177bc933b9 Author: Michael Sproul Date: Mon Jan 6 15:32:16 2025 +1100 Try fixing tests commit 88bedf09bc647de66bd1ff944bbc8fb13e2b7590 Author: Michael Sproul Date: Mon Jan 6 15:04:37 2025 +1100 Revert "Remove footgun function" This reverts commit de01f923c7452355c87f50c0e8031ca94fa00d36. commit 32483d385b66f252d50cee5b524e2924157bdcd4 Author: Michael Sproul Date: Mon Jan 6 15:04:32 2025 +1100 Fix typo commit 2e86585b478c012f6e3483989c87e38161227674 Author: Michael Sproul Date: Mon Jan 6 15:04:15 2025 +1100 Move from preset to config commit 1095d60a40be20dd3c229b759fc3c228b51e51e3 Author: Michael Sproul Date: Mon Jan 6 14:38:40 2025 +1100 Minor simplifications commit de01f923c7452355c87f50c0e8031ca94fa00d36 Author: Michael Sproul Date: Mon Jan 6 14:06:57 2025 +1100 Remove footgun function commit 0c2c8c42245c25b8cf17885faf20acd3b81140ec Merge: 21ecb58ff f51a292f7 Author: Michael Sproul Date: Mon Jan 6 14:02:50 2025 +1100 Merge remote-tracking branch 'origin/unstable' into max-blobs-preset commit f51a292f77575a1786af34271fb44954f141c377 Author: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Fri Jan 3 20:27:21 2025 +0100 fully lint only explicitly to avoid unnecessary rebuilds (#6753) * fully lint only explicitly to avoid unnecessary rebuilds commit 7e0cddef321c2a069582c65b58e5f46590d60c49 Author: Akihito Nakano Date: Tue Dec 24 10:38:56 2024 +0900 Make sure we have fanout peers when publish (#6738) * Ensure that `fanout_peers` is always non-empty if it's `Some` commit 21ecb58ff88b86435ab62d9ac227394c10fdcd22 Merge: 2fcb2935e 9aefb5539 Author: Pawan Dhananjay Date: Mon Oct 21 14:46:00 2024 -0700 Merge branch 'unstable' into max-blobs-preset commit 2fcb2935ec7ef4cd18bbdd8aedb7de61fac69e61 Author: Pawan Dhananjay Date: Fri Sep 6 18:28:31 2024 -0700 Fix test from unstable commit 12c6ef118a1a6d910c48d9d4b23004f3609264c7 Author: Pawan Dhananjay Date: Wed Sep 4 16:16:36 2024 -0700 Fix some more tests commit d37733b846ce58e318e976d6503ca394b4901141 Author: Pawan Dhananjay Date: Wed Sep 4 12:47:36 2024 -0700 Fix test compilations commit 52bb581e071d5f474d519366e860a4b3a0b52f78 Author: Pawan Dhananjay Date: Tue Sep 3 18:38:19 2024 -0700 cleanup commit e71020e3e613910e0315f558ead661b490a0ff20 Author: Pawan Dhananjay Date: Tue Sep 3 17:16:10 2024 -0700 Fix take impl on RuntimeFixedList commit 13f9bba6470b2140e5c34f14aed06dab2b062c1c Merge: 60100fc6b 4e675cf5d Author: Pawan Dhananjay Date: Tue Sep 3 16:08:59 2024 -0700 Merge branch 'unstable' into max-blobs-preset commit 60100fc6be72792ff33913d7e5a53434c792aacf Author: Pawan Dhananjay Date: Fri Aug 30 16:04:11 2024 -0700 Fix some todos commit a9cb329a221a809f7dd818984753826f91c2e26b Author: Pawan Dhananjay Date: Fri Aug 30 15:54:00 2024 -0700 Use empty_uninitialized and fix warnings commit 4dc6e6515ecf75cefa4de840edc7b57e76a8fc9e Author: Pawan Dhananjay Date: Fri Aug 30 15:53:18 2024 -0700 Add restrictions to RuntimeVariableList api commit 25feedfde348b530c4fa2348cc71a06b746898ed Author: Pawan Dhananjay Date: Thu Aug 29 16:11:19 2024 -0700 First pass * Fix tests * Implement max_blobs_per_block_electra * Fix config issues * Simplify BlobSidecarListFromRoot * Disable PeerDAS tests * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Bump quota to account for new target (6) * Remove clone * Fix issue from review * Try to remove ugliness * Merge branch 'unstable' into max-blobs-preset * Merge remote-tracking branch 'origin/unstable' into electra-alpha10 * Merge commit '04b3743ec1e0b650269dd8e58b540c02430d1c0d' into electra-alpha10 * Merge remote-tracking branch 'pawan/max-blobs-preset' into electra-alpha10 * Update tests to v1.5.0-beta.0 * Resolve merge conflicts * Linting * fmt * Fix test and add TODO * Gracefully handle slashed proposers in fork choice tests * Merge remote-tracking branch 'origin/unstable' into electra-alpha10 * Keep latest changes from max_blobs_per_block PR in codec.rs * Revert a few more regressions and add a comment * Disable more DAS tests * Improve validator monitor test a little * Make test more robust * Fix sync test that didn't understand blobs * Fill out cropped comment --- .../beacon_chain/tests/validator_monitor.rs | 82 ++++++++----------- .../src/engine_api/new_payload_request.rs | 5 ++ beacon_node/execution_layer/src/lib.rs | 1 + .../lighthouse_network/src/rpc/protocol.rs | 2 +- beacon_node/network/src/sync/tests/range.rs | 53 ++++++++---- consensus/fork_choice/tests/tests.rs | 61 +++++++------- .../src/per_block_processing.rs | 36 +++++--- .../process_operations.rs | 33 +++++--- .../src/per_epoch_processing/single_pass.rs | 4 +- .../state_processing/src/upgrade/electra.rs | 4 +- consensus/types/presets/gnosis/electra.yaml | 13 ++- consensus/types/presets/mainnet/altair.yaml | 2 + consensus/types/presets/mainnet/electra.yaml | 13 ++- consensus/types/presets/mainnet/phase0.yaml | 2 +- consensus/types/presets/minimal/altair.yaml | 2 + consensus/types/presets/minimal/deneb.yaml | 8 +- consensus/types/presets/minimal/electra.yaml | 15 ++-- consensus/types/presets/minimal/phase0.yaml | 6 +- consensus/types/src/beacon_state.rs | 53 ++++++++++-- consensus/types/src/chain_spec.rs | 24 +++++- consensus/types/src/eth_spec.rs | 14 ++-- .../types/src/pending_partial_withdrawal.rs | 2 +- consensus/types/src/preset.rs | 4 +- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 6 ++ .../src/cases/genesis_initialization.rs | 3 +- .../ef_tests/src/cases/genesis_validity.rs | 4 + testing/ef_tests/src/handler.rs | 23 ++++-- testing/ef_tests/tests/tests.rs | 10 ++- 29 files changed, 309 insertions(+), 178 deletions(-) diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index 91de4fe2702..180db6d76dd 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -4,7 +4,7 @@ use beacon_chain::test_utils::{ use beacon_chain::validator_monitor::{ValidatorMonitorConfig, MISSED_BLOCK_LAG_SLOTS}; use logging::test_logger; use std::sync::LazyLock; -use types::{Epoch, EthSpec, ForkName, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 48; @@ -117,7 +117,7 @@ async fn missed_blocks_across_epochs() { } #[tokio::test] -async fn produces_missed_blocks() { +async fn missed_blocks_basic() { let validator_count = 16; let slots_per_epoch = E::slots_per_epoch(); @@ -127,13 +127,10 @@ async fn produces_missed_blocks() { // Generate 63 slots (2 epochs * 32 slots per epoch - 1) let initial_blocks = slots_per_epoch * nb_epoch_to_simulate.as_u64() - 1; - // The validator index of the validator that is 'supposed' to miss a block - let validator_index_to_monitor = 1; - // 1st scenario // // // Missed block happens when slot and prev_slot are in the same epoch - let harness1 = get_harness(validator_count, vec![validator_index_to_monitor]); + let harness1 = get_harness(validator_count, vec![]); harness1 .extend_chain( initial_blocks as usize, @@ -153,7 +150,7 @@ async fn produces_missed_blocks() { let mut prev_slot = Slot::new(idx - 1); let mut duplicate_block_root = *_state.block_roots().get(idx as usize).unwrap(); let mut validator_indexes = _state.get_beacon_proposer_indices(&harness1.spec).unwrap(); - let mut validator_index = validator_indexes[slot_in_epoch.as_usize()]; + let mut missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; let mut proposer_shuffling_decision_root = _state .proposer_shuffling_decision_root(duplicate_block_root) .unwrap(); @@ -170,7 +167,7 @@ async fn produces_missed_blocks() { beacon_proposer_cache.lock().insert( epoch, proposer_shuffling_decision_root, - validator_indexes.into_iter().collect::>(), + validator_indexes, _state.fork() ), Ok(()) @@ -187,12 +184,15 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor = harness1.chain.validator_monitor.write(); + + validator_monitor.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress()); validator_monitor.process_valid_state(nb_epoch_to_simulate, _state, &harness1.chain.spec); // We should have one entry in the missed blocks map assert_eq!( - validator_monitor.get_monitored_validator_missed_block_count(validator_index as u64), - 1 + validator_monitor + .get_monitored_validator_missed_block_count(missed_block_proposer as u64), + 1, ); } @@ -201,23 +201,7 @@ async fn produces_missed_blocks() { // Missed block happens when slot and prev_slot are not in the same epoch // making sure that the cache reloads when the epoch changes // in that scenario the slot that missed a block is the first slot of the epoch - // We are adding other validators to monitor as these ones will miss a block depending on - // the fork name specified when running the test as the proposer cache differs depending on - // the fork name (cf. seed) - // - // If you are adding a new fork and seeing errors, print - // `validator_indexes[slot_in_epoch.as_usize()]` and add it below. - let validator_index_to_monitor = match harness1.spec.fork_name_at_slot::(Slot::new(0)) { - ForkName::Base => 7, - ForkName::Altair => 2, - ForkName::Bellatrix => 4, - ForkName::Capella => 11, - ForkName::Deneb => 3, - ForkName::Electra => 1, - ForkName::Fulu => 6, - }; - - let harness2 = get_harness(validator_count, vec![validator_index_to_monitor]); + let harness2 = get_harness(validator_count, vec![]); let advance_slot_by = 9; harness2 .extend_chain( @@ -238,11 +222,7 @@ async fn produces_missed_blocks() { slot_in_epoch = slot % slots_per_epoch; duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap(); validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap(); - validator_index = validator_indexes[slot_in_epoch.as_usize()]; - // If you are adding a new fork and seeing errors, it means the fork seed has changed the - // validator_index. Uncomment this line, run the test again and add the resulting index to the - // list above. - //eprintln!("new index which needs to be added => {:?}", validator_index); + missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; let beacon_proposer_cache = harness2 .chain @@ -256,7 +236,7 @@ async fn produces_missed_blocks() { beacon_proposer_cache.lock().insert( epoch, duplicate_block_root, - validator_indexes.into_iter().collect::>(), + validator_indexes.clone(), _state2.fork() ), Ok(()) @@ -271,10 +251,12 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor2 = harness2.chain.validator_monitor.write(); + validator_monitor2.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress()); validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec); // We should have one entry in the missed blocks map assert_eq!( - validator_monitor2.get_monitored_validator_missed_block_count(validator_index as u64), + validator_monitor2 + .get_monitored_validator_missed_block_count(missed_block_proposer as u64), 1 ); @@ -282,19 +264,20 @@ async fn produces_missed_blocks() { // // A missed block happens but the validator is not monitored // it should not be flagged as a missed block - idx = initial_blocks + (advance_slot_by) - 7; + while validator_indexes[(idx % slots_per_epoch) as usize] == missed_block_proposer + && idx / slots_per_epoch == epoch.as_u64() + { + idx += 1; + } slot = Slot::new(idx); prev_slot = Slot::new(idx - 1); slot_in_epoch = slot % slots_per_epoch; duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap(); - validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap(); - let not_monitored_validator_index = validator_indexes[slot_in_epoch.as_usize()]; - // This could do with a refactor: https://github.com/sigp/lighthouse/issues/6293 - assert_ne!( - not_monitored_validator_index, - validator_index_to_monitor, - "this test has a fragile dependency on hardcoded indices. you need to tweak some settings or rewrite this" - ); + let second_missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; + + // This test may fail if we can't find another distinct proposer in the same epoch. + // However, this should be vanishingly unlikely: P ~= (1/16)^32 = 2e-39. + assert_ne!(missed_block_proposer, second_missed_block_proposer); assert_eq!( _state2.set_block_root(prev_slot, duplicate_block_root), @@ -306,10 +289,9 @@ async fn produces_missed_blocks() { validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec); // We shouldn't have any entry in the missed blocks map - assert_ne!(validator_index, not_monitored_validator_index); assert_eq!( validator_monitor2 - .get_monitored_validator_missed_block_count(not_monitored_validator_index as u64), + .get_monitored_validator_missed_block_count(second_missed_block_proposer as u64), 0 ); } @@ -318,7 +300,7 @@ async fn produces_missed_blocks() { // // A missed block happens at state.slot - LOG_SLOTS_PER_EPOCH // it shouldn't be flagged as a missed block - let harness3 = get_harness(validator_count, vec![validator_index_to_monitor]); + let harness3 = get_harness(validator_count, vec![]); harness3 .extend_chain( slots_per_epoch as usize, @@ -338,7 +320,7 @@ async fn produces_missed_blocks() { prev_slot = Slot::new(idx - 1); duplicate_block_root = *_state3.block_roots().get(idx as usize).unwrap(); validator_indexes = _state3.get_beacon_proposer_indices(&harness3.spec).unwrap(); - validator_index = validator_indexes[slot_in_epoch.as_usize()]; + missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; proposer_shuffling_decision_root = _state3 .proposer_shuffling_decision_root_at_epoch(epoch, duplicate_block_root) .unwrap(); @@ -355,7 +337,7 @@ async fn produces_missed_blocks() { beacon_proposer_cache.lock().insert( epoch, proposer_shuffling_decision_root, - validator_indexes.into_iter().collect::>(), + validator_indexes, _state3.fork() ), Ok(()) @@ -372,11 +354,13 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor3 = harness3.chain.validator_monitor.write(); + validator_monitor3.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress()); validator_monitor3.process_valid_state(epoch, _state3, &harness3.chain.spec); // We shouldn't have one entry in the missed blocks map assert_eq!( - validator_monitor3.get_monitored_validator_missed_block_count(validator_index as u64), + validator_monitor3 + .get_monitored_validator_missed_block_count(missed_block_proposer as u64), 0 ); } diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index a86b2fd9bbf..23610c9ae45 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -128,6 +128,11 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); + // Check that no transactions in the payload are zero length + if payload.transactions().iter().any(|slice| slice.is_empty()) { + return Err(Error::ZeroLengthTransaction); + } + let (header_hash, rlp_transactions_root) = calculate_execution_block_hash( payload, parent_beacon_block_root, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 118d7adfcaa..f7abe73543b 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -157,6 +157,7 @@ pub enum Error { payload: ExecutionBlockHash, transactions_root: Hash256, }, + ZeroLengthTransaction, PayloadBodiesByRangeNotSupported, InvalidJWTSecret(String), InvalidForkForPayload, diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 681b739d598..780dff937d3 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -710,7 +710,7 @@ pub fn rpc_blob_limits() -> RpcLimits { } } -// TODO(peerdas): fix hardcoded max here +// TODO(das): fix hardcoded max here pub fn rpc_data_column_limits(fork_name: ForkName) -> RpcLimits { RpcLimits::new( DataColumnSidecar::::empty().as_ssz_bytes().len(), diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 6faa8b72472..05d5e4a4143 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -4,12 +4,15 @@ use crate::sync::manager::SLOT_IMPORT_TOLERANCE; use crate::sync::range_sync::RangeSyncType; use crate::sync::SyncMessage; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; -use beacon_chain::EngineState; +use beacon_chain::{block_verification_types::RpcBlock, EngineState, NotifyExecutionLayer}; use lighthouse_network::rpc::{RequestType, StatusMessage}; use lighthouse_network::service::api_types::{AppRequestId, Id, SyncRequestId}; use lighthouse_network::{PeerId, SyncInfo}; use std::time::Duration; -use types::{EthSpec, Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot}; +use types::{ + BlobSidecarList, BlockImportSource, EthSpec, Hash256, MinimalEthSpec as E, SignedBeaconBlock, + SignedBeaconBlockHash, Slot, +}; const D: Duration = Duration::new(0, 0); @@ -154,7 +157,9 @@ impl TestRig { } } - async fn create_canonical_block(&mut self) -> SignedBeaconBlock { + async fn create_canonical_block( + &mut self, + ) -> (SignedBeaconBlock, Option>) { self.harness.advance_slot(); let block_root = self @@ -165,19 +170,39 @@ impl TestRig { AttestationStrategy::AllValidators, ) .await; - self.harness - .chain - .store - .get_full_block(&block_root) - .unwrap() - .unwrap() + // TODO(das): this does not handle data columns yet + let store = &self.harness.chain.store; + let block = store.get_full_block(&block_root).unwrap().unwrap(); + let blobs = if block.fork_name_unchecked().deneb_enabled() { + store.get_blobs(&block_root).unwrap().blobs() + } else { + None + }; + (block, blobs) } - async fn remember_block(&mut self, block: SignedBeaconBlock) { - self.harness - .process_block(block.slot(), block.canonical_root(), (block.into(), None)) + async fn remember_block( + &mut self, + (block, blob_sidecars): (SignedBeaconBlock, Option>), + ) { + // This code is kind of duplicated from Harness::process_block, but takes sidecars directly. + let block_root = block.canonical_root(); + self.harness.set_current_slot(block.slot()); + let _: SignedBeaconBlockHash = self + .harness + .chain + .process_block( + block_root, + RpcBlock::new(Some(block_root), block.into(), blob_sidecars).unwrap(), + NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, + || Ok(()), + ) .await + .unwrap() + .try_into() .unwrap(); + self.harness.chain.recompute_head_at_current_slot().await; } } @@ -217,9 +242,9 @@ async fn state_update_while_purging() { // Need to create blocks that can be inserted into the fork-choice and fit the "known // conditions" below. let head_peer_block = rig_2.create_canonical_block().await; - let head_peer_root = head_peer_block.canonical_root(); + let head_peer_root = head_peer_block.0.canonical_root(); let finalized_peer_block = rig_2.create_canonical_block().await; - let finalized_peer_root = finalized_peer_block.canonical_root(); + let finalized_peer_root = finalized_peer_block.0.canonical_root(); // Get a peer with an advanced head let head_peer = rig.add_head_peer_with_root(head_peer_root); diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 001b80fe113..70b4b73d528 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -10,6 +10,7 @@ use beacon_chain::{ use fork_choice::{ ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, }; +use state_processing::state_advance::complete_state_advance; use std::fmt; use std::sync::Mutex; use std::time::Duration; @@ -172,6 +173,20 @@ impl ForkChoiceTest { let validators = self.harness.get_all_validators(); loop { let slot = self.harness.get_current_slot(); + + // Skip slashed proposers, as we expect validators to get slashed in these tests. + // Presently `make_block` will panic if the proposer is slashed, so we just avoid + // calling it in this case. + complete_state_advance(&mut state, None, slot, &self.harness.spec).unwrap(); + state.build_caches(&self.harness.spec).unwrap(); + let proposer_index = state + .get_beacon_proposer_index(slot, &self.harness.chain.spec) + .unwrap(); + if state.validators().get(proposer_index).unwrap().slashed { + self.harness.advance_slot(); + continue; + } + let (block_contents, state_) = self.harness.make_block(state, slot).await; state = state_; if !predicate(block_contents.0.message(), &state) { @@ -196,17 +211,20 @@ impl ForkChoiceTest { } /// Apply `count` blocks to the chain (with attestations). + /// + /// Note that in the case of slashed validators, their proposals will be skipped and the chain + /// may be advanced by *more than* `count` slots. pub async fn apply_blocks(self, count: usize) -> Self { - self.harness.advance_slot(); - self.harness - .extend_chain( - count, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - - self + // Use `Self::apply_blocks_while` which gracefully handles slashed validators. + let mut blocks_applied = 0; + self.apply_blocks_while(|_, _| { + // Blocks are applied after the predicate is called, so continue applying the block if + // less than *or equal* to the count. + blocks_applied += 1; + blocks_applied <= count + }) + .await + .unwrap() } /// Slash a validator from the previous epoch committee. @@ -244,6 +262,7 @@ impl ForkChoiceTest { /// Apply `count` blocks to the chain (without attestations). pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self { + // This function does not gracefully handle slashed proposers, but may need to in future. self.harness.advance_slot(); self.harness .extend_chain( @@ -1226,14 +1245,6 @@ async fn progressive_balances_cache_attester_slashing() { .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .await .unwrap() - // Note: This test may fail if the shuffling used changes, right now it re-runs with - // deterministic shuffling. A shuffling change my cause the slashed proposer to propose - // again in the next epoch, which results in a block processing failure - // (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip - // the slot in this scenario rather than panic-ing. The same applies to - // `progressive_balances_cache_proposer_slashing`. - .apply_blocks(2) - .await .add_previous_epoch_attester_slashing() .await // expect fork choice to import blocks successfully after a previous epoch attester is @@ -1244,7 +1255,7 @@ async fn progressive_balances_cache_attester_slashing() { // expect fork choice to import another epoch of blocks successfully - the slashed // attester's balance should be excluded from the current epoch total balance in // `ProgressiveBalancesCache` as well. - .apply_blocks(MainnetEthSpec::slots_per_epoch() as usize) + .apply_blocks(E::slots_per_epoch() as usize) .await; } @@ -1257,15 +1268,7 @@ async fn progressive_balances_cache_proposer_slashing() { .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .await .unwrap() - // Note: This test may fail if the shuffling used changes, right now it re-runs with - // deterministic shuffling. A shuffling change may cause the slashed proposer to propose - // again in the next epoch, which results in a block processing failure - // (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip - // the slot in this scenario rather than panic-ing. The same applies to - // `progressive_balances_cache_attester_slashing`. - .apply_blocks(1) - .await - .add_previous_epoch_proposer_slashing(MainnetEthSpec::slots_per_epoch()) + .add_previous_epoch_proposer_slashing(E::slots_per_epoch()) .await // expect fork choice to import blocks successfully after a previous epoch proposer is // slashed, i.e. the slashed proposer's balance is correctly excluded from @@ -1275,6 +1278,6 @@ async fn progressive_balances_cache_proposer_slashing() { // expect fork choice to import another epoch of blocks successfully - the slashed // proposer's balance should be excluded from the current epoch total balance in // `ProgressiveBalancesCache` as well. - .apply_blocks(MainnetEthSpec::slots_per_epoch() as usize) + .apply_blocks(E::slots_per_epoch() as usize) .await; } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 782dbe2a547..502ad25838e 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -1,7 +1,7 @@ use crate::consensus_context::ConsensusContext; use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid}; use rayon::prelude::*; -use safe_arith::{ArithError, SafeArith}; +use safe_arith::{ArithError, SafeArith, SafeArithIter}; use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set}; use std::borrow::Cow; use tree_hash::TreeHash; @@ -509,7 +509,7 @@ pub fn compute_timestamp_at_slot( /// Compute the next batch of withdrawals which should be included in a block. /// -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-get_expected_withdrawals +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/electra/beacon-chain.md#new-get_expected_withdrawals pub fn get_expected_withdrawals( state: &BeaconState, spec: &ChainSpec, @@ -522,9 +522,9 @@ pub fn get_expected_withdrawals( // [New in Electra:EIP7251] // Consume pending partial withdrawals - let partial_withdrawals_count = + let processed_partial_withdrawals_count = if let Ok(partial_withdrawals) = state.pending_partial_withdrawals() { - let mut partial_withdrawals_count = 0; + let mut processed_partial_withdrawals_count = 0; for withdrawal in partial_withdrawals { if withdrawal.withdrawable_epoch > epoch || withdrawals.len() == spec.max_pending_partials_per_withdrawals_sweep as usize @@ -532,8 +532,8 @@ pub fn get_expected_withdrawals( break; } - let withdrawal_balance = state.get_balance(withdrawal.index as usize)?; - let validator = state.get_validator(withdrawal.index as usize)?; + let withdrawal_balance = state.get_balance(withdrawal.validator_index as usize)?; + let validator = state.get_validator(withdrawal.validator_index as usize)?; let has_sufficient_effective_balance = validator.effective_balance >= spec.min_activation_balance; @@ -549,7 +549,7 @@ pub fn get_expected_withdrawals( ); withdrawals.push(Withdrawal { index: withdrawal_index, - validator_index: withdrawal.index, + validator_index: withdrawal.validator_index, address: validator .get_execution_withdrawal_address(spec) .ok_or(BeaconStateError::NonExecutionAddresWithdrawalCredential)?, @@ -557,9 +557,9 @@ pub fn get_expected_withdrawals( }); withdrawal_index.safe_add_assign(1)?; } - partial_withdrawals_count.safe_add_assign(1)?; + processed_partial_withdrawals_count.safe_add_assign(1)?; } - Some(partial_withdrawals_count) + Some(processed_partial_withdrawals_count) } else { None }; @@ -570,9 +570,19 @@ pub fn get_expected_withdrawals( ); for _ in 0..bound { let validator = state.get_validator(validator_index as usize)?; - let balance = *state.balances().get(validator_index as usize).ok_or( - BeaconStateError::BalancesOutOfBounds(validator_index as usize), - )?; + let partially_withdrawn_balance = withdrawals + .iter() + .filter_map(|withdrawal| { + (withdrawal.validator_index == validator_index).then_some(withdrawal.amount) + }) + .safe_sum()?; + let balance = state + .balances() + .get(validator_index as usize) + .ok_or(BeaconStateError::BalancesOutOfBounds( + validator_index as usize, + ))? + .safe_sub(partially_withdrawn_balance)?; if validator.is_fully_withdrawable_at(balance, epoch, spec, fork_name) { withdrawals.push(Withdrawal { index: withdrawal_index, @@ -604,7 +614,7 @@ pub fn get_expected_withdrawals( .safe_rem(state.validators().len() as u64)?; } - Ok((withdrawals.into(), partial_withdrawals_count)) + Ok((withdrawals.into(), processed_partial_withdrawals_count)) } /// Apply withdrawals to the state. diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 4977f7c7e9d..82dd6167241 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -507,11 +507,11 @@ pub fn process_withdrawal_requests( } // Verify pubkey exists - let Some(index) = state.pubkey_cache().get(&request.validator_pubkey) else { + let Some(validator_index) = state.pubkey_cache().get(&request.validator_pubkey) else { continue; }; - let validator = state.get_validator(index)?; + let validator = state.get_validator(validator_index)?; // Verify withdrawal credentials let has_correct_credential = validator.has_execution_withdrawal_credential(spec); let is_correct_source_address = validator @@ -542,16 +542,16 @@ pub fn process_withdrawal_requests( continue; } - let pending_balance_to_withdraw = state.get_pending_balance_to_withdraw(index)?; + let pending_balance_to_withdraw = state.get_pending_balance_to_withdraw(validator_index)?; if is_full_exit_request { // Only exit validator if it has no pending withdrawals in the queue if pending_balance_to_withdraw == 0 { - initiate_validator_exit(state, index, spec)? + initiate_validator_exit(state, validator_index, spec)? } continue; } - let balance = state.get_balance(index)?; + let balance = state.get_balance(validator_index)?; let has_sufficient_effective_balance = validator.effective_balance >= spec.min_activation_balance; let has_excess_balance = balance @@ -576,7 +576,7 @@ pub fn process_withdrawal_requests( state .pending_partial_withdrawals_mut()? .push(PendingPartialWithdrawal { - index: index as u64, + validator_index: validator_index as u64, amount: to_withdraw, withdrawable_epoch, })?; @@ -739,8 +739,8 @@ pub fn process_consolidation_request( } let target_validator = state.get_validator(target_index)?; - // Verify the target has execution withdrawal credentials - if !target_validator.has_execution_withdrawal_credential(spec) { + // Verify the target has compounding withdrawal credentials + if !target_validator.has_compounding_withdrawal_credential(spec) { return Ok(()); } @@ -757,6 +757,18 @@ pub fn process_consolidation_request( { return Ok(()); } + // Verify the source has been active long enough + if current_epoch + < source_validator + .activation_epoch + .safe_add(spec.shard_committee_period)? + { + return Ok(()); + } + // Verify the source has no pending withdrawals in the queue + if state.get_pending_balance_to_withdraw(source_index)? > 0 { + return Ok(()); + } // Initiate source validator exit and append pending consolidation let source_exit_epoch = state @@ -772,10 +784,5 @@ pub fn process_consolidation_request( target_index: target_index as u64, })?; - let target_validator = state.get_validator(target_index)?; - // Churn any target excess active balance of target and raise its max - if target_validator.has_eth1_withdrawal_credential(spec) { - state.switch_to_compounding_validator(target_index, spec)?; - } Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 904e68e3686..a4a81c8eeff 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -1057,14 +1057,12 @@ fn process_pending_consolidations( } // Calculate the consolidated balance - let max_effective_balance = - source_validator.get_max_effective_balance(spec, state_ctxt.fork_name); let source_effective_balance = std::cmp::min( *state .balances() .get(source_index) .ok_or(BeaconStateError::UnknownValidator(source_index))?, - max_effective_balance, + source_validator.effective_balance, ); // Move active balance to target. Excess balance is withdrawable. diff --git a/consensus/state_processing/src/upgrade/electra.rs b/consensus/state_processing/src/upgrade/electra.rs index 1e64ef28978..0f32e1553d9 100644 --- a/consensus/state_processing/src/upgrade/electra.rs +++ b/consensus/state_processing/src/upgrade/electra.rs @@ -14,13 +14,15 @@ pub fn upgrade_to_electra( ) -> Result<(), Error> { let epoch = pre_state.current_epoch(); + let activation_exit_epoch = spec.compute_activation_exit_epoch(epoch)?; let earliest_exit_epoch = pre_state .validators() .iter() .filter(|v| v.exit_epoch != spec.far_future_epoch) .map(|v| v.exit_epoch) .max() - .unwrap_or(epoch) + .unwrap_or(activation_exit_epoch) + .max(activation_exit_epoch) .safe_add(1)?; // The total active balance cache must be built before the consolidation churn limit diff --git a/consensus/types/presets/gnosis/electra.yaml b/consensus/types/presets/gnosis/electra.yaml index 660ed9b64cf..42afbb233ed 100644 --- a/consensus/types/presets/gnosis/electra.yaml +++ b/consensus/types/presets/gnosis/electra.yaml @@ -10,7 +10,7 @@ MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 # State list lengths # --------------------------------------------------------------- # `uint64(2**27)` (= 134,217,728) -PENDING_BALANCE_DEPOSITS_LIMIT: 134217728 +PENDING_DEPOSITS_LIMIT: 134217728 # `uint64(2**27)` (= 134,217,728) PENDING_PARTIAL_WITHDRAWALS_LIMIT: 134217728 # `uint64(2**18)` (= 262,144) @@ -29,12 +29,12 @@ WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 MAX_ATTESTER_SLASHINGS_ELECTRA: 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA: 8 -# `uint64(2**0)` (= 1) -MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1 +# `uint64(2**1)` (= 2) +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Execution # --------------------------------------------------------------- -# 2**13 (= 8192) receipts +# 2**13 (= 8192) deposit requests MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 8192 # 2**4 (= 16) withdrawal requests MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 @@ -43,3 +43,8 @@ MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 # --------------------------------------------------------------- # 2**3 ( = 8) pending withdrawals MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 8 + +# Pending deposits processing +# --------------------------------------------------------------- +# 2**4 ( = 4) pending deposits +MAX_PENDING_DEPOSITS_PER_EPOCH: 16 diff --git a/consensus/types/presets/mainnet/altair.yaml b/consensus/types/presets/mainnet/altair.yaml index 9a17b780327..813ef72122a 100644 --- a/consensus/types/presets/mainnet/altair.yaml +++ b/consensus/types/presets/mainnet/altair.yaml @@ -22,3 +22,5 @@ EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 256 # --------------------------------------------------------------- # 1 MIN_SYNC_COMMITTEE_PARTICIPANTS: 1 +# SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD (= 32 * 256) +UPDATE_TIMEOUT: 8192 diff --git a/consensus/types/presets/mainnet/electra.yaml b/consensus/types/presets/mainnet/electra.yaml index 660ed9b64cf..42afbb233ed 100644 --- a/consensus/types/presets/mainnet/electra.yaml +++ b/consensus/types/presets/mainnet/electra.yaml @@ -10,7 +10,7 @@ MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 # State list lengths # --------------------------------------------------------------- # `uint64(2**27)` (= 134,217,728) -PENDING_BALANCE_DEPOSITS_LIMIT: 134217728 +PENDING_DEPOSITS_LIMIT: 134217728 # `uint64(2**27)` (= 134,217,728) PENDING_PARTIAL_WITHDRAWALS_LIMIT: 134217728 # `uint64(2**18)` (= 262,144) @@ -29,12 +29,12 @@ WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 MAX_ATTESTER_SLASHINGS_ELECTRA: 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA: 8 -# `uint64(2**0)` (= 1) -MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1 +# `uint64(2**1)` (= 2) +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Execution # --------------------------------------------------------------- -# 2**13 (= 8192) receipts +# 2**13 (= 8192) deposit requests MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 8192 # 2**4 (= 16) withdrawal requests MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 @@ -43,3 +43,8 @@ MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 # --------------------------------------------------------------- # 2**3 ( = 8) pending withdrawals MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 8 + +# Pending deposits processing +# --------------------------------------------------------------- +# 2**4 ( = 4) pending deposits +MAX_PENDING_DEPOSITS_PER_EPOCH: 16 diff --git a/consensus/types/presets/mainnet/phase0.yaml b/consensus/types/presets/mainnet/phase0.yaml index 02bc96c8cdb..00133ba3690 100644 --- a/consensus/types/presets/mainnet/phase0.yaml +++ b/consensus/types/presets/mainnet/phase0.yaml @@ -85,4 +85,4 @@ MAX_ATTESTATIONS: 128 # 2**4 (= 16) MAX_DEPOSITS: 16 # 2**4 (= 16) -MAX_VOLUNTARY_EXITS: 16 +MAX_VOLUNTARY_EXITS: 16 \ No newline at end of file diff --git a/consensus/types/presets/minimal/altair.yaml b/consensus/types/presets/minimal/altair.yaml index 88d78bea365..5e472c49cf3 100644 --- a/consensus/types/presets/minimal/altair.yaml +++ b/consensus/types/presets/minimal/altair.yaml @@ -22,3 +22,5 @@ EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8 # --------------------------------------------------------------- # 1 MIN_SYNC_COMMITTEE_PARTICIPANTS: 1 +# SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD (= 8 * 8) +UPDATE_TIMEOUT: 64 diff --git a/consensus/types/presets/minimal/deneb.yaml b/consensus/types/presets/minimal/deneb.yaml index b1bbc4ee541..c101de3162d 100644 --- a/consensus/types/presets/minimal/deneb.yaml +++ b/consensus/types/presets/minimal/deneb.yaml @@ -2,9 +2,9 @@ # Misc # --------------------------------------------------------------- -# [customized] +# `uint64(4096)` FIELD_ELEMENTS_PER_BLOB: 4096 # [customized] -MAX_BLOB_COMMITMENTS_PER_BLOCK: 16 -# [customized] `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 4 = 9 -KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 9 +MAX_BLOB_COMMITMENTS_PER_BLOCK: 32 +# [customized] `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 5 = 10 +KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 10 diff --git a/consensus/types/presets/minimal/electra.yaml b/consensus/types/presets/minimal/electra.yaml index ef1ce494d8e..44e4769756e 100644 --- a/consensus/types/presets/minimal/electra.yaml +++ b/consensus/types/presets/minimal/electra.yaml @@ -10,7 +10,7 @@ MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 # State list lengths # --------------------------------------------------------------- # `uint64(2**27)` (= 134,217,728) -PENDING_BALANCE_DEPOSITS_LIMIT: 134217728 +PENDING_DEPOSITS_LIMIT: 134217728 # [customized] `uint64(2**6)` (= 64) PENDING_PARTIAL_WITHDRAWALS_LIMIT: 64 # [customized] `uint64(2**6)` (= 64) @@ -29,8 +29,8 @@ WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 MAX_ATTESTER_SLASHINGS_ELECTRA: 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA: 8 -# `uint64(2**0)` (= 1) -MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1 +# `uint64(2**1)` (= 2) +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Execution # --------------------------------------------------------------- @@ -41,5 +41,10 @@ MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 2 # Withdrawals processing # --------------------------------------------------------------- -# 2**0 ( = 1) pending withdrawals -MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 1 +# 2**1 ( = 2) pending withdrawals +MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 2 + +# Pending deposits processing +# --------------------------------------------------------------- +# 2**4 ( = 4) pending deposits +MAX_PENDING_DEPOSITS_PER_EPOCH: 16 diff --git a/consensus/types/presets/minimal/phase0.yaml b/consensus/types/presets/minimal/phase0.yaml index 1f756031421..d9a6a2b6c0d 100644 --- a/consensus/types/presets/minimal/phase0.yaml +++ b/consensus/types/presets/minimal/phase0.yaml @@ -4,11 +4,11 @@ # --------------------------------------------------------------- # [customized] Just 4 committees for slot for testing purposes MAX_COMMITTEES_PER_SLOT: 4 -# [customized] unsecure, but fast +# [customized] insecure, but fast TARGET_COMMITTEE_SIZE: 4 # 2**11 (= 2,048) MAX_VALIDATORS_PER_COMMITTEE: 2048 -# [customized] Faster, but unsecure. +# [customized] Faster, but insecure. SHUFFLE_ROUND_COUNT: 10 # 4 HYSTERESIS_QUOTIENT: 4 @@ -85,4 +85,4 @@ MAX_ATTESTATIONS: 128 # 2**4 (= 16) MAX_DEPOSITS: 16 # 2**4 (= 16) -MAX_VOLUNTARY_EXITS: 16 +MAX_VOLUNTARY_EXITS: 16 \ No newline at end of file diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index de6077bf940..6f44998cdff 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -46,6 +46,7 @@ mod tests; pub const CACHED_EPOCHS: usize = 3; const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; +const MAX_RANDOM_VALUE: u64 = (1 << 16) - 1; pub type Validators = List::ValidatorRegistryLimit>; pub type Balances = List::ValidatorRegistryLimit>; @@ -916,6 +917,11 @@ impl BeaconState { } let max_effective_balance = spec.max_effective_balance_for_fork(self.fork_name_unchecked()); + let max_random_value = if self.fork_name_unchecked().electra_enabled() { + MAX_RANDOM_VALUE + } else { + MAX_RANDOM_BYTE + }; let mut i = 0; loop { @@ -929,10 +935,10 @@ impl BeaconState { let candidate_index = *indices .get(shuffled_index) .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; - let random_byte = Self::shuffling_random_byte(i, seed)?; + let random_value = self.shuffling_random_value(i, seed)?; let effective_balance = self.get_effective_balance(candidate_index)?; - if effective_balance.safe_mul(MAX_RANDOM_BYTE)? - >= max_effective_balance.safe_mul(u64::from(random_byte))? + if effective_balance.safe_mul(max_random_value)? + >= max_effective_balance.safe_mul(random_value)? { return Ok(candidate_index); } @@ -940,6 +946,19 @@ impl BeaconState { } } + /// Fork-aware abstraction for the shuffling. + /// + /// In Electra and later, the random value is a 16-bit integer stored in a `u64`. + /// + /// Prior to Electra, the random value is an 8-bit integer stored in a `u64`. + fn shuffling_random_value(&self, i: usize, seed: &[u8]) -> Result { + if self.fork_name_unchecked().electra_enabled() { + Self::shuffling_random_u16_electra(i, seed).map(u64::from) + } else { + Self::shuffling_random_byte(i, seed).map(u64::from) + } + } + /// Get a random byte from the given `seed`. /// /// Used by the proposer & sync committee selection functions. @@ -953,6 +972,21 @@ impl BeaconState { .ok_or(Error::ShuffleIndexOutOfBounds(index)) } + /// Get two random bytes from the given `seed`. + /// + /// This is used in place of `shuffling_random_byte` from Electra onwards. + fn shuffling_random_u16_electra(i: usize, seed: &[u8]) -> Result { + let mut preimage = seed.to_vec(); + preimage.append(&mut int_to_bytes8(i.safe_div(16)? as u64)); + let offset = i.safe_rem(16)?.safe_mul(2)?; + hash(&preimage) + .get(offset..offset.safe_add(2)?) + .ok_or(Error::ShuffleIndexOutOfBounds(offset))? + .try_into() + .map(u16::from_le_bytes) + .map_err(|_| Error::ShuffleIndexOutOfBounds(offset)) + } + /// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`. pub fn latest_execution_payload_header(&self) -> Result, Error> { match self { @@ -1120,6 +1154,11 @@ impl BeaconState { let seed = self.get_seed(epoch, Domain::SyncCommittee, spec)?; let max_effective_balance = spec.max_effective_balance_for_fork(self.fork_name_unchecked()); + let max_random_value = if self.fork_name_unchecked().electra_enabled() { + MAX_RANDOM_VALUE + } else { + MAX_RANDOM_BYTE + }; let mut i = 0; let mut sync_committee_indices = Vec::with_capacity(E::SyncCommitteeSize::to_usize()); @@ -1134,10 +1173,10 @@ impl BeaconState { let candidate_index = *active_validator_indices .get(shuffled_index) .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; - let random_byte = Self::shuffling_random_byte(i, seed.as_slice())?; + let random_value = self.shuffling_random_value(i, seed.as_slice())?; let effective_balance = self.get_validator(candidate_index)?.effective_balance; - if effective_balance.safe_mul(MAX_RANDOM_BYTE)? - >= max_effective_balance.safe_mul(u64::from(random_byte))? + if effective_balance.safe_mul(max_random_value)? + >= max_effective_balance.safe_mul(random_value)? { sync_committee_indices.push(candidate_index); } @@ -2205,7 +2244,7 @@ impl BeaconState { for withdrawal in self .pending_partial_withdrawals()? .iter() - .filter(|withdrawal| withdrawal.index as usize == validator_index) + .filter(|withdrawal| withdrawal.validator_index as usize == validator_index) { pending_balance.safe_add_assign(withdrawal.amount)?; } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 65f4c37aa15..ea4d8641f6c 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -191,6 +191,7 @@ pub struct ChainSpec { pub max_pending_partials_per_withdrawals_sweep: u64, pub min_per_epoch_churn_limit_electra: u64, pub max_per_epoch_activation_exit_churn_limit: u64, + pub max_blobs_per_block_electra: u64, /* * Fulu hard fork params @@ -623,9 +624,12 @@ impl ChainSpec { } /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for `fork`. - pub fn max_blobs_per_block_by_fork(&self, _fork_name: ForkName) -> u64 { - // TODO(electra): add Electra blobs per block change here - self.max_blobs_per_block + pub fn max_blobs_per_block_by_fork(&self, fork_name: ForkName) -> u64 { + if fork_name.electra_enabled() { + self.max_blobs_per_block_electra + } else { + self.max_blobs_per_block + } } pub fn data_columns_per_subnet(&self) -> usize { @@ -826,6 +830,7 @@ impl ChainSpec { u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) }) .expect("calculation does not overflow"), + max_blobs_per_block_electra: default_max_blobs_per_block_electra(), /* * Fulu hard fork params @@ -940,7 +945,7 @@ impl ChainSpec { // Electra electra_fork_version: [0x05, 0x00, 0x00, 0x01], electra_fork_epoch: None, - max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 0) + max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 1) .expect("pow does not overflow"), min_per_epoch_churn_limit_electra: option_wrapper(|| { u64::checked_pow(2, 6)?.checked_mul(u64::checked_pow(10, 9)?) @@ -1156,6 +1161,7 @@ impl ChainSpec { u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) }) .expect("calculation does not overflow"), + max_blobs_per_block_electra: default_max_blobs_per_block_electra(), /* * Fulu hard fork params @@ -1412,6 +1418,9 @@ pub struct Config { #[serde(default = "default_max_per_epoch_activation_exit_churn_limit")] #[serde(with = "serde_utils::quoted_u64")] max_per_epoch_activation_exit_churn_limit: u64, + #[serde(default = "default_max_blobs_per_block_electra")] + #[serde(with = "serde_utils::quoted_u64")] + max_blobs_per_block_electra: u64, #[serde(default = "default_custody_requirement")] #[serde(with = "serde_utils::quoted_u64")] @@ -1554,6 +1563,10 @@ const fn default_max_per_epoch_activation_exit_churn_limit() -> u64 { 256_000_000_000 } +const fn default_max_blobs_per_block_electra() -> u64 { + 9 +} + const fn default_attestation_propagation_slot_range() -> u64 { 32 } @@ -1773,6 +1786,7 @@ impl Config { min_per_epoch_churn_limit_electra: spec.min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit: spec .max_per_epoch_activation_exit_churn_limit, + max_blobs_per_block_electra: spec.max_blobs_per_block_electra, custody_requirement: spec.custody_requirement, data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, @@ -1850,6 +1864,7 @@ impl Config { min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, + max_blobs_per_block_electra, custody_requirement, data_column_sidecar_subnet_count, number_of_columns, @@ -1919,6 +1934,7 @@ impl Config { min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, + max_blobs_per_block_electra, // We need to re-derive any values that might have changed in the config. max_blocks_by_root_request: max_blocks_by_root_request_common(max_request_blocks), diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 976766dfa9d..0bc074072f6 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -3,10 +3,10 @@ use crate::*; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ - bit::B0, UInt, U0, U1, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U134217728, - U16, U16777216, U2, U2048, U256, U262144, U32, U4, U4096, U512, U625, U64, U65536, U8, U8192, + bit::B0, UInt, U0, U1, U10, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, + U134217728, U16, U16777216, U17, U2, U2048, U256, U262144, U32, U4, U4096, U512, U625, U64, + U65536, U8, U8192, }; -use ssz_types::typenum::{U17, U9}; use std::fmt::{self, Debug}; use std::str::FromStr; @@ -431,7 +431,7 @@ impl EthSpec for MainnetEthSpec { type PendingDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; - type MaxConsolidationRequestsPerPayload = U1; + type MaxConsolidationRequestsPerPayload = U2; type MaxDepositRequestsPerPayload = U8192; type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; @@ -466,8 +466,8 @@ impl EthSpec for MinimalEthSpec { type MaxWithdrawalsPerPayload = U4; type FieldElementsPerBlob = U4096; type BytesPerBlob = U131072; - type MaxBlobCommitmentsPerBlock = U16; - type KzgCommitmentInclusionProofDepth = U9; + type MaxBlobCommitmentsPerBlock = U32; + type KzgCommitmentInclusionProofDepth = U10; type PendingPartialWithdrawalsLimit = U64; type PendingConsolidationsLimit = U64; type MaxDepositRequestsPerPayload = U4; @@ -558,7 +558,7 @@ impl EthSpec for GnosisEthSpec { type PendingDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; - type MaxConsolidationRequestsPerPayload = U1; + type MaxConsolidationRequestsPerPayload = U2; type MaxDepositRequestsPerPayload = U8192; type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; diff --git a/consensus/types/src/pending_partial_withdrawal.rs b/consensus/types/src/pending_partial_withdrawal.rs index e5ace7b2736..846dd973602 100644 --- a/consensus/types/src/pending_partial_withdrawal.rs +++ b/consensus/types/src/pending_partial_withdrawal.rs @@ -21,7 +21,7 @@ use tree_hash_derive::TreeHash; )] pub struct PendingPartialWithdrawal { #[serde(with = "serde_utils::quoted_u64")] - pub index: u64, + pub validator_index: u64, #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, pub withdrawable_epoch: Epoch, diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index f64b7051e5f..9a9915e458a 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -234,7 +234,7 @@ pub struct ElectraPreset { #[serde(with = "serde_utils::quoted_u64")] pub max_pending_partials_per_withdrawals_sweep: u64, #[serde(with = "serde_utils::quoted_u64")] - pub pending_balance_deposits_limit: u64, + pub pending_deposits_limit: u64, #[serde(with = "serde_utils::quoted_u64")] pub pending_partial_withdrawals_limit: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -260,7 +260,7 @@ impl ElectraPreset { whistleblower_reward_quotient_electra: spec.whistleblower_reward_quotient_electra, max_pending_partials_per_withdrawals_sweep: spec .max_pending_partials_per_withdrawals_sweep, - pending_balance_deposits_limit: E::pending_deposits_limit() as u64, + pending_deposits_limit: E::pending_deposits_limit() as u64, pending_partial_withdrawals_limit: E::pending_partial_withdrawals_limit() as u64, pending_consolidations_limit: E::pending_consolidations_limit() as u64, max_consolidation_requests_per_payload: E::max_consolidation_requests_per_payload() diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index d5f4997bb7e..7108e3e8f68 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-alpha.8 +TESTS_TAG := v1.5.0-beta.0 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index dacca204c19..bf9e5d6cfa4 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -35,6 +35,8 @@ "tests/.*/.*/ssz_static/LightClientStore", # LightClientSnapshot "tests/.*/.*/ssz_static/LightClientSnapshot", + # LightClientDataCollection + "tests/minimal/.*/light_client/data_collection", # One of the EF researchers likes to pack the tarballs on a Mac ".*\\.DS_Store.*", # More Mac weirdness. @@ -48,6 +50,10 @@ "tests/.*/eip6110", "tests/.*/whisk", "tests/.*/eip7594", + # Fulu tests are not yet being run + "tests/.*/fulu", + # TODO(electra): SingleAttestation tests are waiting on Eitan's PR + "tests/.*/electra/ssz_static/SingleAttestation" ] diff --git a/testing/ef_tests/src/cases/genesis_initialization.rs b/testing/ef_tests/src/cases/genesis_initialization.rs index 11402c75e62..210e18f781a 100644 --- a/testing/ef_tests/src/cases/genesis_initialization.rs +++ b/testing/ef_tests/src/cases/genesis_initialization.rs @@ -66,8 +66,7 @@ impl LoadCase for GenesisInitialization { impl Case for GenesisInitialization { fn is_enabled_for_fork(fork_name: ForkName) -> bool { - // Altair genesis and later requires real crypto. - fork_name == ForkName::Base || cfg!(not(feature = "fake_crypto")) + fork_name == ForkName::Base } fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/genesis_validity.rs b/testing/ef_tests/src/cases/genesis_validity.rs index e977fa3d637..8fb9f2fbdcb 100644 --- a/testing/ef_tests/src/cases/genesis_validity.rs +++ b/testing/ef_tests/src/cases/genesis_validity.rs @@ -39,6 +39,10 @@ impl LoadCase for GenesisValidity { } impl Case for GenesisValidity { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Base + } + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { let spec = &testing_spec::(fork_name); diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index d8fe061061a..2e49b1301d4 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -350,7 +350,7 @@ where self.supported_forks.contains(&fork_name) } - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { // This ensures we only run the tests **once** for `Eip7594`, using the types matching the // correct fork, e.g. `Eip7594` uses SSZ types from `Deneb` as of spec test version // `v1.5.0-alpha.8`, therefore the `Eip7594` tests should get included when testing Deneb types. @@ -362,8 +362,11 @@ where // SszStaticHandler::, MainnetEthSpec>::pre_electra().run(); // SszStaticHandler::, MainnetEthSpec>::electra_only().run(); // ``` + /* TODO(das): re-enable feature_name == FeatureName::Eip7594 && self.supported_forks.contains(&feature_name.fork_name()) + */ + false } } @@ -385,8 +388,10 @@ where BeaconState::::name().into() } - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - feature_name == FeatureName::Eip7594 + fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { + // TODO(das): re-enable + // feature_name == FeatureName::Eip7594 + false } } @@ -410,8 +415,10 @@ where T::name().into() } - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - feature_name == FeatureName::Eip7594 + fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { + // TODO(das): re-enable + // feature_name == FeatureName::Eip7594 + false } } @@ -995,8 +1002,10 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { - feature_name == FeatureName::Eip7594 + fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { + // TODO(das): re-enable this + // feature_name == FeatureName::Eip7594 + false } } diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 691d27951ad..7c268123fae 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -237,9 +237,7 @@ macro_rules! ssz_static_test_no_run { #[cfg(feature = "fake_crypto")] mod ssz_static { - use ef_tests::{ - FeatureName, Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler, - }; + use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; use types::historical_summary::HistoricalSummary; use types::{ AttesterSlashingBase, AttesterSlashingElectra, ConsolidationRequest, DepositRequest, @@ -624,6 +622,7 @@ mod ssz_static { SszStaticHandler::::capella_and_later().run(); } + /* FIXME(das): re-enable #[test] fn data_column_sidecar() { SszStaticHandler::, MinimalEthSpec>::deneb_only() @@ -639,6 +638,7 @@ mod ssz_static { SszStaticHandler::::deneb_only() .run_for_feature(FeatureName::Eip7594); } + */ #[test] fn consolidation() { @@ -899,6 +899,7 @@ fn kzg_verify_kzg_proof() { KZGVerifyKZGProofHandler::::default().run(); } +/* FIXME(das): re-enable these tests #[test] fn kzg_compute_cells_and_proofs() { KZGComputeCellsAndKZGProofHandler::::default() @@ -916,6 +917,7 @@ fn kzg_recover_cells_and_proofs() { KZGRecoverCellsAndKZGProofHandler::::default() .run_for_feature(FeatureName::Eip7594); } +*/ #[test] fn beacon_state_merkle_proof_validity() { @@ -947,8 +949,10 @@ fn rewards() { } } +/* FIXME(das): re-enable these tests #[test] fn get_custody_columns() { GetCustodyColumnsHandler::::default().run_for_feature(FeatureName::Eip7594); GetCustodyColumnsHandler::::default().run_for_feature(FeatureName::Eip7594); } +*/