From 8cb9b5e126f851139a1afcbe1a654f40f0fbde04 Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 9 Dec 2022 06:39:19 +0000 Subject: [PATCH 01/23] Expose certain `validator_monitor` metrics to the HTTP API (#3760) ## Issue Addressed #3724 ## Proposed Changes Exposes certain `validator_monitor` as an endpoint on the HTTP API. Will only return metrics for validators which are actively being monitored. ### Usage ```bash curl -X GET "http://localhost:5052/lighthouse/ui/validator_metrics" -H "accept: application/json" | jq ``` ```json { "data": { "validators": { "12345": { "attestation_hits": 10, "attestation_misses": 0, "attestation_hit_percentage": 100, "attestation_head_hits": 10, "attestation_head_misses": 0, "attestation_head_hit_percentage": 100, "attestation_target_hits": 5, "attestation_target_misses": 5, "attestation_target_hit_percentage": 50 } } } } ``` ## Additional Info Based on #3756 which should be merged first. --- beacon_node/beacon_chain/src/lib.rs | 2 +- .../beacon_chain/src/validator_monitor.rs | 8 ++ beacon_node/http_api/src/lib.rs | 21 ++- beacon_node/http_api/src/ui.rs | 128 +++++++++++++++++- book/src/api-lighthouse.md | 27 ++++ 5 files changed, 181 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 3889fe4aa53..fd1c1cceb1f 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -22,7 +22,7 @@ pub mod fork_revert; mod head_tracker; pub mod historical_blocks; pub mod merge_readiness; -mod metrics; +pub mod metrics; pub mod migrate; mod naive_aggregation_pool; mod observed_aggregates; diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index f9203f74bf3..c99f85639cb 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -629,6 +629,14 @@ impl ValidatorMonitor { self.validators.len() } + // Return the `id`'s of all monitored validators. + pub fn get_all_monitored_validators(&self) -> Vec { + self.validators + .iter() + .map(|(_, val)| val.id.clone()) + .collect() + } + /// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`. /// Otherwise, do nothing. pub fn auto_register_local_validator(&mut self, validator_index: u64) { diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 645c4ccfaba..b018f9c7375 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2899,6 +2899,22 @@ pub fn serve( }) }); + // POST lighthouse/ui/validator_metrics + let post_lighthouse_ui_validator_metrics = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("validator_metrics")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and_then( + |request_data: ui::ValidatorMetricsRequestData, chain: Arc>| { + blocking_json_task(move || { + ui::post_validator_monitor_metrics(request_data, chain) + .map(api_types::GenericResponse::from) + }) + }, + ); + // GET lighthouse/syncing let get_lighthouse_syncing = warp::path("lighthouse") .and(warp::path("syncing")) @@ -3349,6 +3365,7 @@ pub fn serve( .or(get_validator_sync_committee_contribution.boxed()) .or(get_lighthouse_health.boxed()) .or(get_lighthouse_ui_health.boxed()) + .or(get_lighthouse_ui_validator_count.boxed()) .or(get_lighthouse_syncing.boxed()) .or(get_lighthouse_nat.boxed()) .or(get_lighthouse_peers.boxed()) @@ -3366,7 +3383,6 @@ pub fn serve( .or(get_lighthouse_attestation_performance.boxed()) .or(get_lighthouse_block_packing_efficiency.boxed()) .or(get_lighthouse_merge_readiness.boxed()) - .or(get_lighthouse_ui_validator_count.boxed()) .or(get_events.boxed()), ) .boxed() @@ -3390,7 +3406,8 @@ pub fn serve( .or(post_lighthouse_liveness.boxed()) .or(post_lighthouse_database_reconstruct.boxed()) .or(post_lighthouse_database_historical_blocks.boxed()) - .or(post_lighthouse_block_rewards.boxed()), + .or(post_lighthouse_block_rewards.boxed()) + .or(post_lighthouse_ui_validator_metrics.boxed()), )) .recover(warp_utils::reject::handle_rejection) .with(slog_logging(log.clone())) diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs index 8f9400dbbd0..a5b3a8b2f2e 100644 --- a/beacon_node/http_api/src/ui.rs +++ b/beacon_node/http_api/src/ui.rs @@ -1,10 +1,11 @@ -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::ValidatorStatus; use serde::{Deserialize, Serialize}; +use std::collections::{HashMap, HashSet}; use std::sync::Arc; use warp_utils::reject::beacon_chain_error; -#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorCountResponse { pub active_ongoing: u64, pub active_exiting: u64, @@ -69,3 +70,126 @@ pub fn get_validator_count( exited_slashed, }) } + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorMetricsRequestData { + indices: Vec, +} + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorMetrics { + attestation_hits: u64, + attestation_misses: u64, + attestation_hit_percentage: f64, + attestation_head_hits: u64, + attestation_head_misses: u64, + attestation_head_hit_percentage: f64, + attestation_target_hits: u64, + attestation_target_misses: u64, + attestation_target_hit_percentage: f64, +} + +#[derive(PartialEq, Serialize, Deserialize)] +pub struct ValidatorMetricsResponse { + validators: HashMap, +} + +pub fn post_validator_monitor_metrics( + request_data: ValidatorMetricsRequestData, + chain: Arc>, +) -> Result { + let validator_ids = chain + .validator_monitor + .read() + .get_all_monitored_validators() + .iter() + .cloned() + .collect::>(); + + let indices = request_data + .indices + .iter() + .map(|index| index.to_string()) + .collect::>(); + + let ids = validator_ids + .intersection(&indices) + .collect::>(); + + let mut validators = HashMap::new(); + + for id in ids { + let attestation_hits = metrics::get_int_counter( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT, + &[id], + ) + .map(|counter| counter.get()) + .unwrap_or(0); + let attestation_misses = metrics::get_int_counter( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS, + &[id], + ) + .map(|counter| counter.get()) + .unwrap_or(0); + let attestations = attestation_hits + attestation_misses; + let attestation_hit_percentage: f64 = if attestations == 0 { + 0.0 + } else { + (100 * attestation_hits / attestations) as f64 + }; + + let attestation_head_hits = metrics::get_int_counter( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT, + &[id], + ) + .map(|counter| counter.get()) + .unwrap_or(0); + let attestation_head_misses = metrics::get_int_counter( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS, + &[id], + ) + .map(|counter| counter.get()) + .unwrap_or(0); + let head_attestations = attestation_head_hits + attestation_head_misses; + let attestation_head_hit_percentage: f64 = if head_attestations == 0 { + 0.0 + } else { + (100 * attestation_head_hits / head_attestations) as f64 + }; + + let attestation_target_hits = metrics::get_int_counter( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT, + &[id], + ) + .map(|counter| counter.get()) + .unwrap_or(0); + let attestation_target_misses = metrics::get_int_counter( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS, + &[id], + ) + .map(|counter| counter.get()) + .unwrap_or(0); + let target_attestations = attestation_target_hits + attestation_target_misses; + let attestation_target_hit_percentage: f64 = if target_attestations == 0 { + 0.0 + } else { + (100 * attestation_target_hits / target_attestations) as f64 + }; + + let metrics = ValidatorMetrics { + attestation_hits, + attestation_misses, + attestation_hit_percentage, + attestation_head_hits, + attestation_head_misses, + attestation_head_hit_percentage, + attestation_target_hits, + attestation_target_misses, + attestation_target_hit_percentage, + }; + + validators.insert(id.clone(), metrics); + } + + Ok(ValidatorMetricsResponse { validators }) +} diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 763372692ee..2b7239361a4 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -121,6 +121,33 @@ curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: ap } ``` +### `/lighthouse/ui/validator_metrics` +Re-exposes certain metrics from the validator monitor to the HTTP API. +Will only return metrics for the validators currently being monitored and are present in the POST data. +```bash +curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indices": [12345]}' -H "Content-Type: application/json" | jq +``` + +```json +{ + "data": { + "validators": { + "12345": { + "attestation_hits": 10, + "attestation_misses": 0, + "attestation_hit_percentage": 100, + "attestation_head_hits": 10, + "attestation_head_misses": 0, + "attestation_head_hit_percentage": 100, + "attestation_target_hits": 5, + "attestation_target_misses": 5, + "attestation_target_hit_percentage": 50 + } + } + } +} +``` + ### `/lighthouse/syncing` ```bash From 80dd615fff736ae87c4c3979edaa9e019cd9b70c Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 9 Dec 2022 09:20:10 +0000 Subject: [PATCH 02/23] Update book with missing Lighthouse endpoints (#3769) ## Proposed Changes Adds docs for the following endpoints: - `/lighthouse/analysis/attestation_performance` - `/lighthouse/analysis/block_packing_efficiency` --- book/src/api-lighthouse.md | 144 +++++++++++++++++++++++++++++++++---- book/src/api.md | 2 +- 2 files changed, 132 insertions(+), 14 deletions(-) diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 2b7239361a4..05cb0b69cf8 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -501,6 +501,102 @@ The endpoint will return immediately. See the beacon node logs for an indication Manually provide `SignedBeaconBlock`s to backfill the database. This is intended for use by Lighthouse developers during testing only. +### `/lighthouse/merge_readiness` + +```bash +curl -X GET "http://localhost:5052/lighthouse/merge_readiness" | jq +``` + +``` +{ + "data":{ + "type":"ready", + "config":{ + "terminal_total_difficulty":"6400" + }, + "current_difficulty":"4800" + } + } +``` + +### `/lighthouse/analysis/attestation_performance/{index}` + +Fetch information about the attestation performance of a validator index or all validators for a +range of consecutive epochs. + +Two query parameters are required: + +* `start_epoch` (inclusive): the first epoch to compute attestation performance for. +* `end_epoch` (inclusive): the final epoch to compute attestation performance for. + +Example: + +```bash +curl -X GET "http://localhost:5052/lighthouse/analysis/attestation_performance/1?start_epoch=1&end_epoch=1" | jq +``` + +```json +[ + { + "index": 1, + "epochs": { + "1": { + "active": true, + "head": true, + "target": true, + "source": true, + "delay": 1 + } + } + } +] +``` + +Instead of specifying a validator index, you can specify the entire validator set by using `global`: + +```bash +curl -X GET "http://localhost:5052/lighthouse/analysis/attestation_performance/global?start_epoch=1&end_epoch=1" | jq +``` + +```json +[ + { + "index": 0, + "epochs": { + "1": { + "active": true, + "head": true, + "target": true, + "source": true, + "delay": 1 + } + } + }, + { + "index": 1, + "epochs": { + "1": { + "active": true, + "head": true, + "target": true, + "source": true, + "delay": 1 + } + } + }, + { + .. + } +] + +``` + +Caveats: + +* For maximum efficiency the start_epoch should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. + This is because the state _prior_ to the `start_epoch` needs to be loaded from the database, + and loading a state on a boundary is most efficient. + ### `/lighthouse/analysis/block_rewards` Fetch information about the block rewards paid to proposers for a range of consecutive blocks. @@ -513,7 +609,7 @@ Two query parameters are required: Example: ```bash -curl "http://localhost:5052/lighthouse/analysis/block_rewards?start_slot=1&end_slot=32" | jq +curl -X GET "http://localhost:5052/lighthouse/analysis/block_rewards?start_slot=1&end_slot=32" | jq ``` ```json @@ -541,21 +637,43 @@ Caveats: [block_reward_src]: https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_rewards.rs +### `/lighthouse/analysis/block_packing` -### `/lighthouse/merge_readiness` +Fetch information about the block packing efficiency of blocks for a range of consecutive +epochs. + +Two query parameters are required: + +* `start_epoch` (inclusive): the epoch of the first block to compute packing efficiency for. +* `end_epoch` (inclusive): the epoch of the last block to compute packing efficiency for. ```bash -curl -X GET "http://localhost:5052/lighthouse/merge_readiness" +curl -X GET "http://localhost:5052/lighthouse/analysis/block_packing_efficiency?start_epoch=1&end_epoch=1" | jq ``` +```json +[ + { + "slot": "33", + "block_hash": "0xb20970bb97c6c6de6b1e2b689d6381dd15b3d3518fbaee032229495f963bd5da", + "proposer_info": { + "validator_index": 855, + "graffiti": "poapZoJ7zWNfK7F3nWjEausWVBvKa6gA" + }, + "available_attestations": 3805, + "included_attestations": 1143, + "prior_skip_slots": 1 + }, + { + .. + } +] ``` -{ - "data":{ - "type":"ready", - "config":{ - "terminal_total_difficulty":"6400" - }, - "current_difficulty":"4800" - } - } -``` + +Caveats: + +* `start_epoch` must not be `0`. +* For maximum efficiency the `start_epoch` should satisfy `(start_epoch * slots_per_epoch) % slots_per_restore_point == 1`. + This is because the state _prior_ to the `start_epoch` needs to be loaded from the database, and + loading a state on a boundary is most efficient. + diff --git a/book/src/api.md b/book/src/api.md index f8c54ad9a91..5837ad9654a 100644 --- a/book/src/api.md +++ b/book/src/api.md @@ -6,4 +6,4 @@ RESTful HTTP/JSON APIs. There are two APIs served by Lighthouse: - [Beacon Node API](./api-bn.md) -- [Validator Client API](./api-vc.md) (not yet released). +- [Validator Client API](./api-vc.md) From 979b73c9b62832b0be42f917bdd15378df00562c Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 9 Dec 2022 09:20:13 +0000 Subject: [PATCH 03/23] Add API endpoint to get VC graffiti (#3779) ## Issue Addressed #3766 ## Proposed Changes Adds an endpoint to get the graffiti that will be used for the next block proposal for each validator. ## Usage ```bash curl -H "Authorization: Bearer api-token" http://localhost:9095/lighthouse/ui/graffiti | jq ``` ```json { "data": { "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e": "mr f was here", "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b": "mr v was here", "0x872c61b4a7f8510ec809e5b023f5fdda2105d024c470ddbbeca4bc74e8280af0d178d749853e8f6a841083ac1b4db98f": null } } ``` ## Additional Info This will only return graffiti that the validator client knows about. That is from these 3 sources: 1. Graffiti File 2. validator_definitions.yml 3. The `--graffiti` flag on the VC If the graffiti is set on the BN, it will not be returned. This may warrant an additional endpoint on the BN side which can be used in the event the endpoint returns `null`. --- book/src/api-vc-endpoints.md | 25 ++++++++++ validator_client/src/block_service.rs | 20 +++----- validator_client/src/http_api/mod.rs | 50 ++++++++++++++++++- validator_client/src/http_api/tests.rs | 2 + .../src/initialized_validators.rs | 9 ++++ validator_client/src/lib.rs | 28 ++++++++++- 6 files changed, 118 insertions(+), 16 deletions(-) diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 76cffc0e4f5..80a14ae7710 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -117,6 +117,31 @@ Returns information regarding the health of the host machine. } ``` +## `GET /lighthouse/ui/graffiti` + +Returns the graffiti that will be used for the next block proposal of each validator. + +### HTTP Specification + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/ui/graffiti` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | + +### Example Response Body + +```json +{ + "data": { + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e": "mr f was here", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b": "mr v was here", + "0x872c61b4a7f8510ec809e5b023f5fdda2105d024c470ddbbeca4bc74e8280af0d178d749853e8f6a841083ac1b4db98f": null + } +} +``` + ## `GET /lighthouse/spec` Returns the Ethereum proof-of-stake consensus specification loaded for this validator. diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index b0b69a4f50d..f0d2c9081f0 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -1,6 +1,7 @@ use crate::beacon_node_fallback::{Error as FallbackError, Errors}; use crate::{ beacon_node_fallback::{BeaconNodeFallback, RequireSynced}, + determine_graffiti, graffiti_file::GraffitiFile, OfflineOnFailure, }; @@ -298,18 +299,13 @@ impl BlockService { })? .into(); - let graffiti = self - .graffiti_file - .clone() - .and_then(|mut g| match g.load_graffiti(&validator_pubkey) { - Ok(g) => g, - Err(e) => { - warn!(log, "Failed to read graffiti file"; "error" => ?e); - None - } - }) - .or_else(|| self.validator_store.graffiti(&validator_pubkey)) - .or(self.graffiti); + let graffiti = determine_graffiti( + &validator_pubkey, + log, + self.graffiti_file.clone(), + self.validator_store.graffiti(&validator_pubkey), + self.graffiti, + ); let randao_reveal_ref = &randao_reveal; let self_ref = &self; diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index df5d0c606e9..600e7a4c683 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -4,7 +4,7 @@ mod keystores; mod remotekeys; mod tests; -use crate::ValidatorStore; +use crate::{determine_graffiti, GraffitiFile, ValidatorStore}; use account_utils::{ mnemonic_from_phrase, validator_definitions::{SigningDefinition, ValidatorDefinition, Web3SignerDefinition}, @@ -13,13 +13,14 @@ pub use api_secret::ApiSecret; use create_validator::{create_validators_mnemonic, create_validators_web3signer}; use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, - types::{self as api_types, GenericResponse, PublicKey, PublicKeyBytes}, + types::{self as api_types, GenericResponse, Graffiti, PublicKey, PublicKeyBytes}, }; use lighthouse_version::version_with_platform; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use slog::{crit, info, warn, Logger}; use slot_clock::SlotClock; +use std::collections::HashMap; use std::future::Future; use std::marker::PhantomData; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; @@ -65,6 +66,8 @@ pub struct Context { pub api_secret: ApiSecret, pub validator_store: Option>>, pub validator_dir: Option, + pub graffiti_file: Option, + pub graffiti_flag: Option, pub spec: ChainSpec, pub config: Config, pub log: Logger, @@ -177,6 +180,12 @@ pub fn serve( }) }); + let inner_graffiti_file = ctx.graffiti_file.clone(); + let graffiti_file_filter = warp::any().map(move || inner_graffiti_file.clone()); + + let inner_graffiti_flag = ctx.graffiti_flag; + let graffiti_flag_filter = warp::any().map(move || inner_graffiti_flag); + let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); @@ -329,6 +338,42 @@ pub fn serve( }) }); + let get_lighthouse_ui_graffiti = warp::path("lighthouse") + .and(warp::path("ui")) + .and(warp::path("graffiti")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(graffiti_file_filter) + .and(graffiti_flag_filter) + .and(signer.clone()) + .and(log_filter.clone()) + .and_then( + |validator_store: Arc>, + graffiti_file: Option, + graffiti_flag: Option, + signer, + log| { + blocking_signed_json_task(signer, move || { + let mut result = HashMap::new(); + for (key, graffiti_definition) in validator_store + .initialized_validators() + .read() + .get_all_validators_graffiti() + { + let graffiti = determine_graffiti( + key, + &log, + graffiti_file.clone(), + graffiti_definition, + graffiti_flag, + ); + result.insert(key.to_string(), graffiti.map(|g| g.as_utf8_lossy())); + } + Ok(api_types::GenericResponse::from(result)) + }) + }, + ); + // POST lighthouse/validators/ let post_validators = warp::path("lighthouse") .and(warp::path("validators")) @@ -945,6 +990,7 @@ pub fn serve( .or(get_lighthouse_validators) .or(get_lighthouse_validators_pubkey) .or(get_lighthouse_ui_health) + .or(get_lighthouse_ui_graffiti) .or(get_fee_recipient) .or(get_gas_limit) .or(get_std_keystores) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index b121dda5b1a..5aa24a2b022 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -120,6 +120,8 @@ impl ApiTester { api_secret, validator_dir: Some(validator_dir.path().into()), validator_store: Some(validator_store.clone()), + graffiti_file: None, + graffiti_flag: Some(Graffiti::default()), spec: E::default_spec(), config: HttpConfig { enabled: true, diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 8d9fbe281fc..e8fe6ff2ff9 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -634,6 +634,15 @@ impl InitializedValidators { self.validators.get(public_key).and_then(|v| v.graffiti) } + /// Returns a `HashMap` of `public_key` -> `graffiti` for all initialized validators. + pub fn get_all_validators_graffiti(&self) -> HashMap<&PublicKeyBytes, Option> { + let mut result = HashMap::new(); + for public_key in self.validators.keys() { + result.insert(public_key, self.graffiti(public_key)); + } + result + } + /// Returns the `suggested_fee_recipient` for a given public key specified in the /// `ValidatorDefinitions`. pub fn suggested_fee_recipient(&self, public_key: &PublicKeyBytes) -> Option
{ diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 1f869562d19..819efec93c9 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -30,13 +30,14 @@ use crate::beacon_node_fallback::{ RequireSynced, }; use crate::doppelganger_service::DoppelgangerService; +use crate::graffiti_file::GraffitiFile; use account_utils::validator_definitions::ValidatorDefinitions; use attestation_service::{AttestationService, AttestationServiceBuilder}; use block_service::{BlockService, BlockServiceBuilder}; use clap::ArgMatches; use duties_service::DutiesService; use environment::RuntimeContext; -use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, StatusCode, Timeouts}; +use eth2::{reqwest::ClientBuilder, types::Graffiti, BeaconNodeHttpClient, StatusCode, Timeouts}; use http_api::ApiSecret; use notifier::spawn_notifier; use parking_lot::RwLock; @@ -57,7 +58,7 @@ use tokio::{ sync::mpsc, time::{sleep, Duration}, }; -use types::{EthSpec, Hash256}; +use types::{EthSpec, Hash256, PublicKeyBytes}; use validator_store::ValidatorStore; /// The interval between attempts to contact the beacon node during startup. @@ -526,6 +527,8 @@ impl ProductionValidatorClient { api_secret, validator_store: Some(self.validator_store.clone()), validator_dir: Some(self.config.validator_dir.clone()), + graffiti_file: self.config.graffiti_file.clone(), + graffiti_flag: self.config.graffiti, spec: self.context.eth2_config.spec.clone(), config: self.config.http_api.clone(), log: log.clone(), @@ -726,3 +729,24 @@ pub fn load_pem_certificate>(pem_path: P) -> Result, + validator_definition_graffiti: Option, + graffiti_flag: Option, +) -> Option { + graffiti_file + .and_then(|mut g| match g.load_graffiti(validator_pubkey) { + Ok(g) => g, + Err(e) => { + warn!(log, "Failed to read graffiti file"; "error" => ?e); + None + } + }) + .or(validator_definition_graffiti) + .or(graffiti_flag) +} From c973bfc90c048def023b6203b42bfd79f9e9961c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 10 Dec 2022 00:45:18 +0000 Subject: [PATCH 04/23] Reduce log severity for late and unrevealed blocks (#3775) ## Issue Addressed NA ## Proposed Changes In #3725 I introduced a `CRIT` log for unrevealed payloads, against @michaelsproul's [advice](https://github.com/sigp/lighthouse/pull/3725#discussion_r1034142113). After being woken up in the middle of the night by a block that was not revealed to the BN but *was* revealed to the network, I have capitulated. This PR implements @michaelsproul's suggestion and reduces the severity to `ERRO`. Additionally, I have dropped a `CRIT` to an `ERRO` for when a block is published late. The block in question was indeed published late on the network, however now that we have builders that can slow down block production I don't think the error is "actionable" enough to warrant a `CRIT` for the user. ## Additional Info NA --- beacon_node/execution_layer/src/lib.rs | 2 +- beacon_node/http_api/src/publish_blocks.rs | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 2a2225cbdfd..dfce9745774 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -1556,7 +1556,7 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME, &[metrics::FAILURE], ); - crit!( + error!( self.log(), "Builder failed to reveal payload"; "info" => "this relay failure may cause a missed proposal", diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 08355c1d376..5d27f117b02 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -5,7 +5,7 @@ use beacon_chain::{ }; use lighthouse_network::PubsubMessage; use network::NetworkMessage; -use slog::{crit, error, info, warn, Logger}; +use slog::{error, info, warn, Logger}; use slot_clock::SlotClock; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; @@ -72,10 +72,10 @@ pub async fn publish_block( // // Check to see the thresholds are non-zero to avoid logging errors with small // slot times (e.g., during testing) - let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); - let error_threshold = crit_threshold / 2; - if delay >= crit_threshold { - crit!( + let too_late_threshold = chain.slot_clock.unagg_attestation_production_delay(); + let delayed_threshold = too_late_threshold / 2; + if delay >= too_late_threshold { + error!( log, "Block was broadcast too late"; "msg" => "system may be overloaded, block likely to be orphaned", @@ -83,7 +83,7 @@ pub async fn publish_block( "slot" => block.slot(), "root" => ?root, ) - } else if delay >= error_threshold { + } else if delay >= delayed_threshold { error!( log, "Block broadcast was delayed"; From 1b28ef8a8d89d3867aaa86aebb36046fe29b2a85 Mon Sep 17 00:00:00 2001 From: GeemoCandama Date: Tue, 13 Dec 2022 06:24:51 +0000 Subject: [PATCH 05/23] Adding light_client gossip topics (#3693) ## Issue Addressed Implementing the light_client_gossip topics but I'm not there yet. Which issue # does this PR address? Partially #3651 ## Proposed Changes Add light client gossip topics. Please list or describe the changes introduced by this PR. I'm going to Implement light_client_finality_update and light_client_optimistic_update gossip topics. Currently I've attempted the former and I'm seeking feedback. ## Additional Info I've only implemented the light_client_finality_update topic because I wanted to make sure I was on the correct path. Also checking that the gossiped LightClientFinalityUpdate is the same as the locally constructed one is not implemented because caching the updates will make this much easier. Could someone give me some feedback on this please? Please provide any additional information. For example, future considerations or information useful for reviewers. Co-authored-by: GeemoCandama <104614073+GeemoCandama@users.noreply.github.com> --- beacon_node/beacon_chain/src/beacon_chain.rs | 44 ++++++ beacon_node/beacon_chain/src/builder.rs | 2 + beacon_node/beacon_chain/src/lib.rs | 2 + ...ght_client_finality_update_verification.rs | 135 +++++++++++++++++ ...t_client_optimistic_update_verification.rs | 125 ++++++++++++++++ beacon_node/beacon_chain/src/metrics.rs | 18 +++ .../src/service/gossip_cache.rs | 26 ++++ .../lighthouse_network/src/service/utils.rs | 2 + .../lighthouse_network/src/types/mod.rs | 5 +- .../lighthouse_network/src/types/pubsub.rs | 40 ++++- .../lighthouse_network/src/types/topics.rs | 15 ++ .../network/src/beacon_processor/mod.rs | 110 +++++++++++++- .../beacon_processor/worker/gossip_methods.rs | 141 +++++++++++++++++- beacon_node/network/src/metrics.rs | 23 +++ beacon_node/network/src/router/mod.rs | 24 +++ beacon_node/network/src/router/processor.rs | 33 +++- beacon_node/network/src/service.rs | 23 +++ consensus/types/src/lib.rs | 3 + .../types/src/light_client_finality_update.rs | 37 ++--- .../src/light_client_optimistic_update.rs | 10 +- 20 files changed, 778 insertions(+), 40 deletions(-) create mode 100644 beacon_node/beacon_chain/src/light_client_finality_update_verification.rs create mode 100644 beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 32ae742d86f..309f6a83e07 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -22,6 +22,12 @@ use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, Prep use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::head_tracker::HeadTracker; use crate::historical_blocks::HistoricalBlockError; +use crate::light_client_finality_update_verification::{ + Error as LightClientFinalityUpdateError, VerifiedLightClientFinalityUpdate, +}; +use crate::light_client_optimistic_update_verification::{ + Error as LightClientOptimisticUpdateError, VerifiedLightClientOptimisticUpdate, +}; use crate::migrate::BackgroundMigrator; use crate::naive_aggregation_pool::{ AggregatedAttestationMap, Error as NaiveAggregationError, NaiveAggregationPool, @@ -335,6 +341,10 @@ pub struct BeaconChain { /// Maintains a record of which validators we've seen attester slashings for. pub(crate) observed_attester_slashings: Mutex, T::EthSpec>>, + /// The most recently validated light client finality update received on gossip. + pub latest_seen_finality_update: Mutex>>, + /// The most recently validated light client optimistic update received on gossip. + pub latest_seen_optimistic_update: Mutex>>, /// Provides information from the Ethereum 1 (PoW) chain. pub eth1_chain: Option>, /// Interfaces with the execution client. @@ -1779,6 +1789,40 @@ impl BeaconChain { }) } + /// Accepts some 'LightClientFinalityUpdate' from the network and attempts to verify it + pub fn verify_finality_update_for_gossip( + self: &Arc, + light_client_finality_update: LightClientFinalityUpdate, + seen_timestamp: Duration, + ) -> Result, LightClientFinalityUpdateError> { + VerifiedLightClientFinalityUpdate::verify( + light_client_finality_update, + self, + seen_timestamp, + ) + .map(|v| { + metrics::inc_counter(&metrics::FINALITY_UPDATE_PROCESSING_SUCCESSES); + v + }) + } + + /// Accepts some 'LightClientOptimisticUpdate' from the network and attempts to verify it + pub fn verify_optimistic_update_for_gossip( + self: &Arc, + light_client_optimistic_update: LightClientOptimisticUpdate, + seen_timestamp: Duration, + ) -> Result, LightClientOptimisticUpdateError> { + VerifiedLightClientOptimisticUpdate::verify( + light_client_optimistic_update, + self, + seen_timestamp, + ) + .map(|v| { + metrics::inc_counter(&metrics::OPTIMISTIC_UPDATE_PROCESSING_SUCCESSES); + v + }) + } + /// Accepts some attestation-type object and attempts to verify it in the context of fork /// choice. If it is valid it is applied to `self.fork_choice`. /// diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 58bbb2b5c6a..f5bd85dec28 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -780,6 +780,8 @@ where observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), + latest_seen_finality_update: <_>::default(), + latest_seen_optimistic_update: <_>::default(), eth1_chain: self.eth1_chain, execution_layer: self.execution_layer, genesis_validators_root, diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index fd1c1cceb1f..a55532ac12f 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -21,6 +21,8 @@ pub mod fork_choice_signal; pub mod fork_revert; mod head_tracker; pub mod historical_blocks; +pub mod light_client_finality_update_verification; +pub mod light_client_optimistic_update_verification; pub mod merge_readiness; pub mod metrics; pub mod migrate; diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs new file mode 100644 index 00000000000..7c431ebccca --- /dev/null +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -0,0 +1,135 @@ +use crate::{ + beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, +}; +use derivative::Derivative; +use slot_clock::SlotClock; +use std::time::Duration; +use strum::AsRefStr; +use types::{ + light_client_update::Error as LightClientUpdateError, LightClientFinalityUpdate, Slot, +}; + +/// Returned when a light client finality update was not successfully verified. It might not have been verified for +/// two reasons: +/// +/// - The light client finality message is malformed or inappropriate for the context (indicated by all variants +/// other than `BeaconChainError`). +/// - The application encountered an internal error whilst attempting to determine validity +/// (the `BeaconChainError` variant) +#[derive(Debug, AsRefStr)] +pub enum Error { + /// Light client finality update message with a lower or equal finalized_header slot already forwarded. + FinalityUpdateAlreadySeen, + /// The light client finality message was received is prior to one-third of slot duration passage. (with + /// respect to the gossip clock disparity and slot clock duration). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + TooEarly, + /// Light client finality update message does not match the locally constructed one. + /// + /// ## Peer Scoring + /// + InvalidLightClientFinalityUpdate, + /// Signature slot start time is none. + SigSlotStartIsNone, + /// Failed to construct a LightClientFinalityUpdate from state. + FailedConstructingUpdate, + /// Beacon chain error occured. + BeaconChainError(BeaconChainError), + LightClientUpdateError(LightClientUpdateError), +} + +impl From for Error { + fn from(e: BeaconChainError) -> Self { + Error::BeaconChainError(e) + } +} + +impl From for Error { + fn from(e: LightClientUpdateError) -> Self { + Error::LightClientUpdateError(e) + } +} + +/// Wraps a `LightClientFinalityUpdate` that has been verified for propagation on the gossip network. +#[derive(Derivative)] +#[derivative(Clone(bound = "T: BeaconChainTypes"))] +pub struct VerifiedLightClientFinalityUpdate { + light_client_finality_update: LightClientFinalityUpdate, + seen_timestamp: Duration, +} + +impl VerifiedLightClientFinalityUpdate { + /// Returns `Ok(Self)` if the `light_client_finality_update` is valid to be (re)published on the gossip + /// network. + pub fn verify( + light_client_finality_update: LightClientFinalityUpdate, + chain: &BeaconChain, + seen_timestamp: Duration, + ) -> Result { + let gossiped_finality_slot = light_client_finality_update.finalized_header.slot; + let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); + let signature_slot = light_client_finality_update.signature_slot; + let start_time = chain.slot_clock.start_of(signature_slot); + let mut latest_seen_finality_update = chain.latest_seen_finality_update.lock(); + + let head = chain.canonical_head.cached_head(); + let head_block = &head.snapshot.beacon_block; + let attested_block_root = head_block.message().parent_root(); + let attested_block = chain + .get_blinded_block(&attested_block_root)? + .ok_or(Error::FailedConstructingUpdate)?; + let mut attested_state = chain + .get_state(&attested_block.state_root(), Some(attested_block.slot()))? + .ok_or(Error::FailedConstructingUpdate)?; + + let finalized_block_root = attested_state.finalized_checkpoint().root; + let finalized_block = chain + .get_blinded_block(&finalized_block_root)? + .ok_or(Error::FailedConstructingUpdate)?; + let latest_seen_finality_update_slot = match latest_seen_finality_update.as_ref() { + Some(update) => update.finalized_header.slot, + None => Slot::new(0), + }; + + // verify that no other finality_update with a lower or equal + // finalized_header.slot was already forwarded on the network + if gossiped_finality_slot <= latest_seen_finality_update_slot { + return Err(Error::FinalityUpdateAlreadySeen); + } + + // verify that enough time has passed for the block to have been propagated + match start_time { + Some(time) => { + if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration + { + return Err(Error::TooEarly); + } + } + None => return Err(Error::SigSlotStartIsNone), + } + + let head_state = &head.snapshot.beacon_state; + let finality_update = LightClientFinalityUpdate::new( + &chain.spec, + head_state, + head_block, + &mut attested_state, + &finalized_block, + )?; + + // verify that the gossiped finality update is the same as the locally constructed one. + if finality_update != light_client_finality_update { + return Err(Error::InvalidLightClientFinalityUpdate); + } + + *latest_seen_finality_update = Some(light_client_finality_update.clone()); + + Ok(Self { + light_client_finality_update, + seen_timestamp, + }) + } +} diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs new file mode 100644 index 00000000000..ec9c90e7355 --- /dev/null +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -0,0 +1,125 @@ +use crate::{ + beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes, +}; +use derivative::Derivative; +use slot_clock::SlotClock; +use std::time::Duration; +use strum::AsRefStr; +use types::{ + light_client_update::Error as LightClientUpdateError, LightClientOptimisticUpdate, Slot, +}; + +/// Returned when a light client optimistic update was not successfully verified. It might not have been verified for +/// two reasons: +/// +/// - The light client optimistic message is malformed or inappropriate for the context (indicated by all variants +/// other than `BeaconChainError`). +/// - The application encountered an internal error whilst attempting to determine validity +/// (the `BeaconChainError` variant) +#[derive(Debug, AsRefStr)] +pub enum Error { + /// Light client optimistic update message with a lower or equal optimistic_header slot already forwarded. + OptimisticUpdateAlreadySeen, + /// The light client optimistic message was received is prior to one-third of slot duration passage. (with + /// respect to the gossip clock disparity and slot clock duration). + /// + /// ## Peer scoring + /// + /// Assuming the local clock is correct, the peer has sent an invalid message. + TooEarly, + /// Light client optimistic update message does not match the locally constructed one. + /// + /// ## Peer Scoring + /// + InvalidLightClientOptimisticUpdate, + /// Signature slot start time is none. + SigSlotStartIsNone, + /// Failed to construct a LightClientOptimisticUpdate from state. + FailedConstructingUpdate, + /// Beacon chain error occured. + BeaconChainError(BeaconChainError), + LightClientUpdateError(LightClientUpdateError), +} + +impl From for Error { + fn from(e: BeaconChainError) -> Self { + Error::BeaconChainError(e) + } +} + +impl From for Error { + fn from(e: LightClientUpdateError) -> Self { + Error::LightClientUpdateError(e) + } +} + +/// Wraps a `LightClientOptimisticUpdate` that has been verified for propagation on the gossip network. +#[derive(Derivative)] +#[derivative(Clone(bound = "T: BeaconChainTypes"))] +pub struct VerifiedLightClientOptimisticUpdate { + light_client_optimistic_update: LightClientOptimisticUpdate, + seen_timestamp: Duration, +} + +impl VerifiedLightClientOptimisticUpdate { + /// Returns `Ok(Self)` if the `light_client_optimistic_update` is valid to be (re)published on the gossip + /// network. + pub fn verify( + light_client_optimistic_update: LightClientOptimisticUpdate, + chain: &BeaconChain, + seen_timestamp: Duration, + ) -> Result { + let gossiped_optimistic_slot = light_client_optimistic_update.attested_header.slot; + let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); + let signature_slot = light_client_optimistic_update.signature_slot; + let start_time = chain.slot_clock.start_of(signature_slot); + let mut latest_seen_optimistic_update = chain.latest_seen_optimistic_update.lock(); + + let head = chain.canonical_head.cached_head(); + let head_block = &head.snapshot.beacon_block; + let attested_block_root = head_block.message().parent_root(); + let attested_block = chain + .get_blinded_block(&attested_block_root)? + .ok_or(Error::FailedConstructingUpdate)?; + + let attested_state = chain + .get_state(&attested_block.state_root(), Some(attested_block.slot()))? + .ok_or(Error::FailedConstructingUpdate)?; + let latest_seen_optimistic_update_slot = match latest_seen_optimistic_update.as_ref() { + Some(update) => update.attested_header.slot, + None => Slot::new(0), + }; + + // verify that no other optimistic_update with a lower or equal + // optimistic_header.slot was already forwarded on the network + if gossiped_optimistic_slot <= latest_seen_optimistic_update_slot { + return Err(Error::OptimisticUpdateAlreadySeen); + } + + // verify that enough time has passed for the block to have been propagated + match start_time { + Some(time) => { + if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration + { + return Err(Error::TooEarly); + } + } + None => return Err(Error::SigSlotStartIsNone), + } + + let optimistic_update = + LightClientOptimisticUpdate::new(&chain.spec, head_block, &attested_state)?; + + // verify that the gossiped optimistic update is the same as the locally constructed one. + if optimistic_update != light_client_optimistic_update { + return Err(Error::InvalidLightClientOptimisticUpdate); + } + + *latest_seen_optimistic_update = Some(light_client_optimistic_update.clone()); + + Ok(Self { + light_client_optimistic_update, + seen_timestamp, + }) + } +} diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index b37c5afc35f..c681570b110 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -948,6 +948,24 @@ lazy_static! { ); } +// Fifth lazy-static block is used to account for macro recursion limit. +lazy_static! { + /* + * Light server message verification + */ + pub static ref FINALITY_UPDATE_PROCESSING_SUCCESSES: Result = try_create_int_counter( + "light_client_finality_update_verification_success_total", + "Number of light client finality updates verified for gossip" + ); + /* + * Light server message verification + */ + pub static ref OPTIMISTIC_UPDATE_PROCESSING_SUCCESSES: Result = try_create_int_counter( + "light_client_optimistic_update_verification_success_total", + "Number of light client optimistic updates verified for gossip" + ); +} + /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, /// head state info, etc) and update the Prometheus `DEFAULT_REGISTRY`. pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 4842605f7aa..c784191cd30 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -34,6 +34,10 @@ pub struct GossipCache { signed_contribution_and_proof: Option, /// Timeout for sync committee messages. sync_committee_message: Option, + /// Timeout for light client finality updates. + light_client_finality_update: Option, + /// Timeout for light client optimistic updates. + light_client_optimistic_update: Option, } #[derive(Default)] @@ -55,6 +59,10 @@ pub struct GossipCacheBuilder { signed_contribution_and_proof: Option, /// Timeout for sync committee messages. sync_committee_message: Option, + /// Timeout for light client finality updates. + light_client_finality_update: Option, + /// Timeout for light client optimistic updates. + light_client_optimistic_update: Option, } #[allow(dead_code)] @@ -113,6 +121,18 @@ impl GossipCacheBuilder { self } + /// Timeout for light client finality update messages. + pub fn light_client_finality_update_timeout(mut self, timeout: Duration) -> Self { + self.light_client_finality_update = Some(timeout); + self + } + + /// Timeout for light client optimistic update messages. + pub fn light_client_optimistic_update_timeout(mut self, timeout: Duration) -> Self { + self.light_client_optimistic_update = Some(timeout); + self + } + pub fn build(self) -> GossipCache { let GossipCacheBuilder { default_timeout, @@ -124,6 +144,8 @@ impl GossipCacheBuilder { attester_slashing, signed_contribution_and_proof, sync_committee_message, + light_client_finality_update, + light_client_optimistic_update, } = self; GossipCache { expirations: DelayQueue::default(), @@ -136,6 +158,8 @@ impl GossipCacheBuilder { attester_slashing: attester_slashing.or(default_timeout), signed_contribution_and_proof: signed_contribution_and_proof.or(default_timeout), sync_committee_message: sync_committee_message.or(default_timeout), + light_client_finality_update: light_client_finality_update.or(default_timeout), + light_client_optimistic_update: light_client_optimistic_update.or(default_timeout), } } } @@ -158,6 +182,8 @@ impl GossipCache { GossipKind::AttesterSlashing => self.attester_slashing, GossipKind::SignedContributionAndProof => self.signed_contribution_and_proof, GossipKind::SyncCommitteeMessage(_) => self.sync_committee_message, + GossipKind::LightClientFinalityUpdate => self.light_client_finality_update, + GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update, }; let expire_timeout = match expire_timeout { Some(expire_timeout) => expire_timeout, diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 8073ae77683..09a8d1a8636 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -253,6 +253,8 @@ pub(crate) fn create_whitelist_filter( add(ProposerSlashing); add(AttesterSlashing); add(SignedContributionAndProof); + add(LightClientFinalityUpdate); + add(LightClientOptimisticUpdate); for id in 0..attestation_subnet_count { add(Attestation(SubnetId::new(id))); } diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index ad02e07fb70..2a5ca6c8062 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -16,4 +16,7 @@ pub use globals::NetworkGlobals; pub use pubsub::{PubsubMessage, SnappyTransform}; pub use subnet::{Subnet, SubnetDiscovery}; pub use sync_state::{BackFillState, SyncState}; -pub use topics::{subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS}; +pub use topics::{ + subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS, + LIGHT_CLIENT_GOSSIP_TOPICS, +}; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index a01072f8e4e..b036e558c99 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -9,10 +9,10 @@ use std::boxed::Box; use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ - Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedBeaconBlockMerge, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, + LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, + SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockMerge, + SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -33,6 +33,10 @@ pub enum PubsubMessage { SignedContributionAndProof(Box>), /// Gossipsub message providing notification of unaggregated sync committee signatures with its subnet id. SyncCommitteeMessage(Box<(SyncSubnetId, SyncCommitteeMessage)>), + /// Gossipsub message providing notification of a light client finality update. + LightClientFinalityUpdate(Box>), + /// Gossipsub message providing notification of a light client optimistic update. + LightClientOptimisticUpdate(Box>), } // Implements the `DataTransform` trait of gossipsub to employ snappy compression @@ -115,6 +119,10 @@ impl PubsubMessage { PubsubMessage::AttesterSlashing(_) => GossipKind::AttesterSlashing, PubsubMessage::SignedContributionAndProof(_) => GossipKind::SignedContributionAndProof, PubsubMessage::SyncCommitteeMessage(data) => GossipKind::SyncCommitteeMessage(data.0), + PubsubMessage::LightClientFinalityUpdate(_) => GossipKind::LightClientFinalityUpdate, + PubsubMessage::LightClientOptimisticUpdate(_) => { + GossipKind::LightClientOptimisticUpdate + } } } @@ -206,6 +214,22 @@ impl PubsubMessage { sync_committee, )))) } + GossipKind::LightClientFinalityUpdate => { + let light_client_finality_update = + LightClientFinalityUpdate::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::LightClientFinalityUpdate(Box::new( + light_client_finality_update, + ))) + } + GossipKind::LightClientOptimisticUpdate => { + let light_client_optimistic_update = + LightClientOptimisticUpdate::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::LightClientOptimisticUpdate(Box::new( + light_client_optimistic_update, + ))) + } } } } @@ -227,6 +251,8 @@ impl PubsubMessage { PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(), PubsubMessage::SignedContributionAndProof(data) => data.as_ssz_bytes(), PubsubMessage::SyncCommitteeMessage(data) => data.1.as_ssz_bytes(), + PubsubMessage::LightClientFinalityUpdate(data) => data.as_ssz_bytes(), + PubsubMessage::LightClientOptimisticUpdate(data) => data.as_ssz_bytes(), } } } @@ -261,6 +287,12 @@ impl std::fmt::Display for PubsubMessage { PubsubMessage::SyncCommitteeMessage(data) => { write!(f, "Sync committee message: subnet_id: {}", *data.0) } + PubsubMessage::LightClientFinalityUpdate(_data) => { + write!(f, "Light CLient Finality Update") + } + PubsubMessage::LightClientOptimisticUpdate(_data) => { + write!(f, "Light CLient Optimistic Update") + } } } } diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 47d703c2600..e7e3cf4abbe 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -18,6 +18,8 @@ pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; pub const SIGNED_CONTRIBUTION_AND_PROOF_TOPIC: &str = "sync_committee_contribution_and_proof"; pub const SYNC_COMMITTEE_PREFIX_TOPIC: &str = "sync_committee_"; +pub const LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; +pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const CORE_TOPICS: [GossipKind; 6] = [ GossipKind::BeaconBlock, @@ -28,6 +30,11 @@ pub const CORE_TOPICS: [GossipKind; 6] = [ GossipKind::SignedContributionAndProof, ]; +pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [ + GossipKind::LightClientFinalityUpdate, + GossipKind::LightClientOptimisticUpdate, +]; + /// A gossipsub topic which encapsulates the type of messages that should be sent and received over /// the pubsub protocol and the way the messages should be encoded. #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] @@ -63,6 +70,10 @@ pub enum GossipKind { /// Topic for publishing unaggregated sync committee signatures on a particular subnet. #[strum(serialize = "sync_committee")] SyncCommitteeMessage(SyncSubnetId), + /// Topic for publishing finality updates for light clients. + LightClientFinalityUpdate, + /// Topic for publishing optimistic updates for light clients. + LightClientOptimisticUpdate, } impl std::fmt::Display for GossipKind { @@ -136,6 +147,8 @@ impl GossipTopic { VOLUNTARY_EXIT_TOPIC => GossipKind::VoluntaryExit, PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing, ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing, + LIGHT_CLIENT_FINALITY_UPDATE => GossipKind::LightClientFinalityUpdate, + LIGHT_CLIENT_OPTIMISTIC_UPDATE => GossipKind::LightClientOptimisticUpdate, topic => match committee_topic_index(topic) { Some(subnet) => match subnet { Subnet::Attestation(s) => GossipKind::Attestation(s), @@ -194,6 +207,8 @@ impl std::fmt::Display for GossipTopic { GossipKind::SyncCommitteeMessage(index) => { format!("{}{}", SYNC_COMMITTEE_PREFIX_TOPIC, *index) } + GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(), + GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(), }; write!( f, diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 9528cfd1dfb..743a97a29c2 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -62,9 +62,9 @@ use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, Hash256, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, + SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use work_reprocessing_queue::{ spawn_reprocess_scheduler, QueuedAggregate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, @@ -129,6 +129,14 @@ const MAX_GOSSIP_PROPOSER_SLASHING_QUEUE_LEN: usize = 4_096; /// before we start dropping them. const MAX_GOSSIP_ATTESTER_SLASHING_QUEUE_LEN: usize = 4_096; +/// The maximum number of queued `LightClientFinalityUpdate` objects received on gossip that will be stored +/// before we start dropping them. +const MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN: usize = 1_024; + +/// The maximum number of queued `LightClientOptimisticUpdate` objects received on gossip that will be stored +/// before we start dropping them. +const MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN: usize = 1_024; + /// The maximum number of queued `SyncCommitteeMessage` objects that will be stored before we start dropping /// them. const MAX_SYNC_MESSAGE_QUEUE_LEN: usize = 2048; @@ -195,6 +203,8 @@ pub const GOSSIP_PROPOSER_SLASHING: &str = "gossip_proposer_slashing"; pub const GOSSIP_ATTESTER_SLASHING: &str = "gossip_attester_slashing"; pub const GOSSIP_SYNC_SIGNATURE: &str = "gossip_sync_signature"; pub const GOSSIP_SYNC_CONTRIBUTION: &str = "gossip_sync_contribution"; +pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; +pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const RPC_BLOCK: &str = "rpc_block"; pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const STATUS_PROCESSING: &str = "status_processing"; @@ -476,6 +486,42 @@ impl WorkEvent { } } + /// Create a new `Work` event for some light client finality update. + pub fn gossip_light_client_finality_update( + message_id: MessageId, + peer_id: PeerId, + light_client_finality_update: Box>, + seen_timestamp: Duration, + ) -> Self { + Self { + drop_during_sync: true, + work: Work::GossipLightClientFinalityUpdate { + message_id, + peer_id, + light_client_finality_update, + seen_timestamp, + }, + } + } + + /// Create a new `Work` event for some light client optimistic update. + pub fn gossip_light_client_optimistic_update( + message_id: MessageId, + peer_id: PeerId, + light_client_optimistic_update: Box>, + seen_timestamp: Duration, + ) -> Self { + Self { + drop_during_sync: true, + work: Work::GossipLightClientOptimisticUpdate { + message_id, + peer_id, + light_client_optimistic_update, + seen_timestamp, + }, + } + } + /// Create a new `Work` event for some attester slashing. pub fn gossip_attester_slashing( message_id: MessageId, @@ -730,6 +776,18 @@ pub enum Work { sync_contribution: Box>, seen_timestamp: Duration, }, + GossipLightClientFinalityUpdate { + message_id: MessageId, + peer_id: PeerId, + light_client_finality_update: Box>, + seen_timestamp: Duration, + }, + GossipLightClientOptimisticUpdate { + message_id: MessageId, + peer_id: PeerId, + light_client_optimistic_update: Box>, + seen_timestamp: Duration, + }, RpcBlock { block_root: Hash256, block: Arc>, @@ -777,6 +835,8 @@ impl Work { Work::GossipAttesterSlashing { .. } => GOSSIP_ATTESTER_SLASHING, Work::GossipSyncSignature { .. } => GOSSIP_SYNC_SIGNATURE, Work::GossipSyncContribution { .. } => GOSSIP_SYNC_CONTRIBUTION, + Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, + Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::RpcBlock { .. } => RPC_BLOCK, Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::Status { .. } => STATUS_PROCESSING, @@ -916,6 +976,10 @@ impl BeaconProcessor { let mut gossip_attester_slashing_queue = FifoQueue::new(MAX_GOSSIP_ATTESTER_SLASHING_QUEUE_LEN); + // Using a FIFO queue for light client updates to maintain sequence order. + let mut finality_update_queue = FifoQueue::new(MAX_GOSSIP_FINALITY_UPDATE_QUEUE_LEN); + let mut optimistic_update_queue = FifoQueue::new(MAX_GOSSIP_OPTIMISTIC_UPDATE_QUEUE_LEN); + // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); @@ -1250,6 +1314,12 @@ impl BeaconProcessor { Work::GossipSyncContribution { .. } => { sync_contribution_queue.push(work) } + Work::GossipLightClientFinalityUpdate { .. } => { + finality_update_queue.push(work, work_id, &self.log) + } + Work::GossipLightClientOptimisticUpdate { .. } => { + optimistic_update_queue.push(work, work_id, &self.log) + } Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log), Work::ChainSegment { ref process_id, .. } => match process_id { ChainSegmentProcessId::RangeBatchId { .. } @@ -1551,7 +1621,7 @@ impl BeaconProcessor { ) }), /* - * Syn contribution verification. + * Sync contribution verification. */ Work::GossipSyncContribution { message_id, @@ -1566,6 +1636,38 @@ impl BeaconProcessor { seen_timestamp, ) }), + /* + * Light client finality update verification. + */ + Work::GossipLightClientFinalityUpdate { + message_id, + peer_id, + light_client_finality_update, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_finality_update( + message_id, + peer_id, + *light_client_finality_update, + seen_timestamp, + ) + }), + /* + * Light client optimistic update verification. + */ + Work::GossipLightClientOptimisticUpdate { + message_id, + peer_id, + light_client_optimistic_update, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_optimistic_update( + message_id, + peer_id, + *light_client_optimistic_update, + seen_timestamp, + ) + }), /* * Verification for beacon blocks received during syncing via RPC. */ diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 947d9cfe274..ef23f6761f6 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -3,6 +3,8 @@ use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::store::Error; use beacon_chain::{ attestation_verification::{self, Error as AttnError, VerifiedAttestation}, + light_client_finality_update_verification::Error as LightClientFinalityUpdateError, + light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, observed_operations::ObservationOutcome, sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, @@ -18,9 +20,10 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, - Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, + LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, + SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, + SyncSubnetId, }; use super::{ @@ -1303,6 +1306,138 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_SYNC_CONTRIBUTION_IMPORTED_TOTAL); } + pub fn process_gossip_finality_update( + self, + message_id: MessageId, + peer_id: PeerId, + light_client_finality_update: LightClientFinalityUpdate, + seen_timestamp: Duration, + ) { + match self + .chain + .verify_finality_update_for_gossip(light_client_finality_update, seen_timestamp) + { + Ok(_verified_light_client_finality_update) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + } + Err(e) => { + metrics::register_finality_update_error(&e); + match e { + LightClientFinalityUpdateError::InvalidLightClientFinalityUpdate => { + debug!( + self.log, + "LC invalid finality update"; + "peer" => %peer_id, + "error" => ?e, + ); + + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "light_client_gossip_error", + ); + } + LightClientFinalityUpdateError::TooEarly => { + debug!( + self.log, + "LC finality update too early"; + "peer" => %peer_id, + "error" => ?e, + ); + + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "light_client_gossip_error", + ); + } + LightClientFinalityUpdateError::FinalityUpdateAlreadySeen => debug!( + self.log, + "LC finality update already seen"; + "peer" => %peer_id, + "error" => ?e, + ), + LightClientFinalityUpdateError::BeaconChainError(_) + | LightClientFinalityUpdateError::LightClientUpdateError(_) + | LightClientFinalityUpdateError::SigSlotStartIsNone + | LightClientFinalityUpdateError::FailedConstructingUpdate => debug!( + self.log, + "LC error constructing finality update"; + "peer" => %peer_id, + "error" => ?e, + ), + } + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + }; + } + + pub fn process_gossip_optimistic_update( + self, + message_id: MessageId, + peer_id: PeerId, + light_client_optimistic_update: LightClientOptimisticUpdate, + seen_timestamp: Duration, + ) { + match self + .chain + .verify_optimistic_update_for_gossip(light_client_optimistic_update, seen_timestamp) + { + Ok(_verified_light_client_optimistic_update) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + } + Err(e) => { + metrics::register_optimistic_update_error(&e); + match e { + LightClientOptimisticUpdateError::InvalidLightClientOptimisticUpdate => { + debug!( + self.log, + "LC invalid optimistic update"; + "peer" => %peer_id, + "error" => ?e, + ); + + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "light_client_gossip_error", + ) + } + LightClientOptimisticUpdateError::TooEarly => { + debug!( + self.log, + "LC optimistic update too early"; + "peer" => %peer_id, + "error" => ?e, + ); + + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "light_client_gossip_error", + ); + } + LightClientOptimisticUpdateError::OptimisticUpdateAlreadySeen => debug!( + self.log, + "LC optimistic update already seen"; + "peer" => %peer_id, + "error" => ?e, + ), + LightClientOptimisticUpdateError::BeaconChainError(_) + | LightClientOptimisticUpdateError::LightClientUpdateError(_) + | LightClientOptimisticUpdateError::SigSlotStartIsNone + | LightClientOptimisticUpdateError::FailedConstructingUpdate => debug!( + self.log, + "LC error constructing optimistic update"; + "peer" => %peer_id, + "error" => ?e, + ), + } + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + }; + } + /// Handle an error whilst verifying an `Attestation` or `SignedAggregateAndProof` from the /// network. fn handle_attestation_verification_failure( diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index b4e7a3bace3..b4f3f29f934 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -1,5 +1,7 @@ use beacon_chain::{ attestation_verification::Error as AttnError, + light_client_finality_update_verification::Error as LightClientFinalityUpdateError, + light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, sync_committee_verification::Error as SyncCommitteeError, }; use fnv::FnvHashMap; @@ -252,6 +254,19 @@ lazy_static! { "Gossipsub sync_committee errors per error type", &["type"] ); + pub static ref GOSSIP_FINALITY_UPDATE_ERRORS_PER_TYPE: Result = + try_create_int_counter_vec( + "gossipsub_light_client_finality_update_errors_per_type", + "Gossipsub light_client_finality_update errors per error type", + &["type"] + ); + pub static ref GOSSIP_OPTIMISTIC_UPDATE_ERRORS_PER_TYPE: Result = + try_create_int_counter_vec( + "gossipsub_light_client_optimistic_update_errors_per_type", + "Gossipsub light_client_optimistic_update errors per error type", + &["type"] + ); + /* * Network queue metrics @@ -358,6 +373,14 @@ pub fn update_bandwidth_metrics(bandwidth: Arc) { ); } +pub fn register_finality_update_error(error: &LightClientFinalityUpdateError) { + inc_counter_vec(&GOSSIP_FINALITY_UPDATE_ERRORS_PER_TYPE, &[error.as_ref()]); +} + +pub fn register_optimistic_update_error(error: &LightClientOptimisticUpdateError) { + inc_counter_vec(&GOSSIP_OPTIMISTIC_UPDATE_ERRORS_PER_TYPE, &[error.as_ref()]); +} + pub fn register_attestation_error(error: &AttnError) { inc_counter_vec(&GOSSIP_ATTESTATION_ERRORS_PER_TYPE, &[error.as_ref()]); } diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 5df308f2592..ce98337cfed 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -280,6 +280,30 @@ impl Router { sync_committtee_msg.0, ); } + PubsubMessage::LightClientFinalityUpdate(light_client_finality_update) => { + trace!( + self.log, + "Received light client finality update"; + "peer_id" => %peer_id + ); + self.processor.on_light_client_finality_update_gossip( + id, + peer_id, + light_client_finality_update, + ); + } + PubsubMessage::LightClientOptimisticUpdate(light_client_optimistic_update) => { + trace!( + self.log, + "Received light client optimistic update"; + "peer_id" => %peer_id + ); + self.processor.on_light_client_optimistic_update_gossip( + id, + peer_id, + light_client_optimistic_update, + ); + } } } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 3c9a4a81fb9..999ba29e90a 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -17,8 +17,9 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::SyncCommitteeMessage; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof, - SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId, + Attestation, AttesterSlashing, EthSpec, LightClientFinalityUpdate, LightClientOptimisticUpdate, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, + SignedVoluntaryExit, SubnetId, SyncSubnetId, }; /// Processes validated messages from the network. It relays necessary data to the syncing thread @@ -368,6 +369,34 @@ impl Processor { )) } + pub fn on_light_client_finality_update_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + light_client_finality_update: Box>, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_light_client_finality_update( + message_id, + peer_id, + light_client_finality_update, + timestamp_now(), + )) + } + + pub fn on_light_client_optimistic_update_gossip( + &mut self, + message_id: MessageId, + peer_id: PeerId, + light_client_optimistic_update: Box>, + ) { + self.send_beacon_processor_work(BeaconWorkEvent::gossip_light_client_optimistic_update( + message_id, + peer_id, + light_client_optimistic_update, + timestamp_now(), + )) + } + fn send_beacon_processor_work(&mut self, work: BeaconWorkEvent) { self.beacon_processor_send .try_send(work) diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 31c42b860de..4568ed1a229 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -208,6 +208,8 @@ pub struct NetworkService { metrics_update: tokio::time::Interval, /// gossipsub_parameter_update timer gossipsub_parameter_update: tokio::time::Interval, + /// enable_light_client_server indicator + enable_light_client_server: bool, /// The logger for the network service. fork_context: Arc, log: slog::Logger, @@ -345,6 +347,7 @@ impl NetworkService { gossipsub_parameter_update, fork_context, log: network_log, + enable_light_client_server: config.enable_light_client_server, }; network_service.spawn_service(executor); @@ -679,6 +682,7 @@ impl NetworkService { } return; } + let mut subscribed_topics: Vec = vec![]; for topic_kind in lighthouse_network::types::CORE_TOPICS.iter() { for fork_digest in self.required_gossip_fork_digests() { @@ -695,6 +699,25 @@ impl NetworkService { } } + if self.enable_light_client_server { + for light_client_topic_kind in + lighthouse_network::types::LIGHT_CLIENT_GOSSIP_TOPICS.iter() + { + for fork_digest in self.required_gossip_fork_digests() { + let light_client_topic = GossipTopic::new( + light_client_topic_kind.clone(), + GossipEncoding::default(), + fork_digest, + ); + if self.libp2p.subscribe(light_client_topic.clone()) { + subscribed_topics.push(light_client_topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %light_client_topic); + } + } + } + } + // If we are to subscribe to all subnets we do it here if self.subscribe_all_subnets { for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 4a6cc57b119..37bab8b4806 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -51,6 +51,7 @@ pub mod graffiti; pub mod historical_batch; pub mod indexed_attestation; pub mod light_client_bootstrap; +pub mod light_client_finality_update; pub mod light_client_optimistic_update; pub mod light_client_update; pub mod pending_attestation; @@ -136,6 +137,8 @@ pub use crate::free_attestation::FreeAttestation; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; +pub use crate::light_client_finality_update::LightClientFinalityUpdate; +pub use crate::light_client_optimistic_update::LightClientOptimisticUpdate; pub use crate::participation_flags::ParticipationFlags; pub use crate::participation_list::ParticipationList; pub use crate::payload::{BlindedPayload, BlockType, ExecPayload, FullPayload}; diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index fe26c0fa3eb..cae6266f9e7 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -1,10 +1,10 @@ -use super::{BeaconBlockHeader, EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; -use crate::{light_client_update::*, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec}; -use safe_arith::ArithError; +use super::{ + BeaconBlockHeader, EthSpec, FixedVector, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, + Slot, SyncAggregate, +}; +use crate::{light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::typenum::{U5, U6}; -use std::sync::Arc; use test_random_derive::TestRandom; use tree_hash::TreeHash; @@ -28,43 +28,38 @@ pub struct LightClientFinalityUpdate { impl LightClientFinalityUpdate { pub fn new( - chain_spec: ChainSpec, - beacon_state: BeaconState, - block: BeaconBlock, + chain_spec: &ChainSpec, + beacon_state: &BeaconState, + block: &SignedBeaconBlock, attested_state: &mut BeaconState, - finalized_block: BeaconBlock, + finalized_block: &SignedBlindedBeaconBlock, ) -> Result { let altair_fork_epoch = chain_spec .altair_fork_epoch .ok_or(Error::AltairForkNotActive)?; - if attested_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch { + if beacon_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch { return Err(Error::AltairForkNotActive); } - let sync_aggregate = block.body().sync_aggregate()?; + let sync_aggregate = block.message().body().sync_aggregate()?; if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize { return Err(Error::NotEnoughSyncCommitteeParticipants); } // Compute and validate attested header. let mut attested_header = attested_state.latest_block_header().clone(); - attested_header.state_root = attested_state.tree_hash_root(); + attested_header.state_root = attested_state.update_tree_hash_cache()?; // Build finalized header from finalized block - let finalized_header = BeaconBlockHeader { - slot: finalized_block.slot(), - proposer_index: finalized_block.proposer_index(), - parent_root: finalized_block.parent_root(), - state_root: finalized_block.state_root(), - body_root: finalized_block.body_root(), - }; + let finalized_header = finalized_block.message().block_header(); + if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { return Err(Error::InvalidFinalizedBlock); } let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?; Ok(Self { - attested_header: attested_header, - finalized_header: finalized_header, + attested_header, + finalized_header, finality_branch: FixedVector::new(finality_branch)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block.slot(), diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 9592bf1c23b..8dda8cd5aed 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -1,6 +1,6 @@ use super::{BeaconBlockHeader, EthSpec, Slot, SyncAggregate}; use crate::{ - light_client_update::Error, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec, + light_client_update::Error, test_utils::TestRandom, BeaconState, ChainSpec, SignedBeaconBlock, }; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -23,9 +23,9 @@ pub struct LightClientOptimisticUpdate { impl LightClientOptimisticUpdate { pub fn new( - chain_spec: ChainSpec, - block: BeaconBlock, - attested_state: BeaconState, + chain_spec: &ChainSpec, + block: &SignedBeaconBlock, + attested_state: &BeaconState, ) -> Result { let altair_fork_epoch = chain_spec .altair_fork_epoch @@ -34,7 +34,7 @@ impl LightClientOptimisticUpdate { return Err(Error::AltairForkNotActive); } - let sync_aggregate = block.body().sync_aggregate()?; + let sync_aggregate = block.message().body().sync_aggregate()?; if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize { return Err(Error::NotEnoughSyncCommitteeParticipants); } From 6f79263a2191030b98f2030800486f4e833f3900 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Dec 2022 06:24:52 +0000 Subject: [PATCH 06/23] Make all validator monitor logs `INFO` (#3727) ## Issue Addressed NA ## Proposed Changes This is a *potentially* contentious change, but I find it annoying that the validator monitor logs `WARN` and `ERRO` for imperfect attestations. Perfect attestation performance is unachievable (don't believe those photo-shopped beauty magazines!) since missed and poorly-packed blocks by other validators will reduce your performance. When the validator monitor is on with 10s or more validators, I find the logs are washed out with ERROs that are not worth investigating. I suspect that users who really want to know if validators are missing attestations can do so by matching the content of the log, rather than the log level. I'm open to feedback about this, especially from anyone who is relying on the current log levels. ## Additional Info NA ## Breaking Changes Notes The validator monitor will no longer emit `WARN` and `ERRO` logs for sub-optimal attestation performance. The logs will now be emitted at `INFO` level. This change was introduced to avoid cluttering the `WARN` and `ERRO` logs with alerts that are frequently triggered by the actions of other network participants (e.g., a missed block) and require no action from the user. --- beacon_node/beacon_chain/src/validator_monitor.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index c99f85639cb..2d093ff886e 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -4,7 +4,7 @@ use crate::metrics; use parking_lot::RwLock; -use slog::{crit, debug, error, info, warn, Logger}; +use slog::{crit, debug, info, Logger}; use slot_clock::SlotClock; use state_processing::per_epoch_processing::{ errors::EpochProcessingError, EpochProcessingSummary, @@ -580,7 +580,7 @@ impl ValidatorMonitor { ); } if !attestation_miss.is_empty() { - error!( + info!( self.log, "Previous epoch attestation(s) missing"; "epoch" => prev_epoch, @@ -589,7 +589,7 @@ impl ValidatorMonitor { } if !head_miss.is_empty() { - warn!( + info!( self.log, "Previous epoch attestation(s) failed to match head"; "epoch" => prev_epoch, @@ -598,7 +598,7 @@ impl ValidatorMonitor { } if !target_miss.is_empty() { - warn!( + info!( self.log, "Previous epoch attestation(s) failed to match target"; "epoch" => prev_epoch, @@ -607,7 +607,7 @@ impl ValidatorMonitor { } if !suboptimal_inclusion.is_empty() { - warn!( + info!( self.log, "Previous epoch attestation(s) had sub-optimal inclusion delay"; "epoch" => prev_epoch, From 775d22229903c560a0468d844b1b4daadc3b2e1e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 13 Dec 2022 09:57:26 +0000 Subject: [PATCH 07/23] Enable proposer boost re-orging (#2860) ## Proposed Changes With proposer boosting implemented (#2822) we have an opportunity to re-org out late blocks. This PR adds three flags to the BN to control this behaviour: * `--disable-proposer-reorgs`: turn aggressive re-orging off (it's on by default). * `--proposer-reorg-threshold N`: attempt to orphan blocks with less than N% of the committee vote. If this parameter isn't set then N defaults to 20% when the feature is enabled. * `--proposer-reorg-epochs-since-finalization N`: only attempt to re-org late blocks when the number of epochs since finalization is less than or equal to N. The default is 2 epochs, meaning re-orgs will only be attempted when the chain is finalizing optimally. For safety Lighthouse will only attempt a re-org under very specific conditions: 1. The block being proposed is 1 slot after the canonical head, and the canonical head is 1 slot after its parent. i.e. at slot `n + 1` rather than building on the block from slot `n` we build on the block from slot `n - 1`. 2. The current canonical head received less than N% of the committee vote. N should be set depending on the proposer boost fraction itself, the fraction of the network that is believed to be applying it, and the size of the largest entity that could be hoarding votes. 3. The current canonical head arrived after the attestation deadline from our perspective. This condition was only added to support suppression of forkchoiceUpdated messages, but makes intuitive sense. 4. The block is being proposed in the first 2 seconds of the slot. This gives it time to propagate and receive the proposer boost. ## Additional Info For the initial idea and background, see: https://github.com/ethereum/consensus-specs/pull/2353#issuecomment-950238004 There is also a specification for this feature here: https://github.com/ethereum/consensus-specs/pull/3034 Co-authored-by: Michael Sproul Co-authored-by: pawan --- .github/custom/clippy.toml | 1 + Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 643 ++++++++++++++---- .../src/beacon_fork_choice_store.rs | 53 +- beacon_node/beacon_chain/src/builder.rs | 26 +- .../beacon_chain/src/canonical_head.rs | 31 +- beacon_node/beacon_chain/src/chain_config.rs | 25 +- beacon_node/beacon_chain/src/errors.rs | 4 +- beacon_node/beacon_chain/src/fork_revert.rs | 3 +- beacon_node/beacon_chain/src/lib.rs | 4 +- beacon_node/beacon_chain/src/metrics.rs | 34 +- .../beacon_chain/src/proposer_prep_service.rs | 13 +- .../beacon_chain/src/snapshot_cache.rs | 7 +- .../beacon_chain/src/state_advance_timer.rs | 15 +- beacon_node/beacon_chain/src/test_utils.rs | 127 +++- beacon_node/beacon_chain/tests/merge.rs | 2 + .../tests/payload_invalidation.rs | 21 +- beacon_node/client/src/builder.rs | 6 +- .../src/engine_api/json_structures.rs | 10 +- beacon_node/execution_layer/src/lib.rs | 28 +- .../test_utils/execution_block_generator.rs | 162 ++++- .../src/test_utils/handle_rpc.rs | 8 + .../execution_layer/src/test_utils/hook.rs | 27 + .../src/test_utils/mock_execution_layer.rs | 15 + .../execution_layer/src/test_utils/mod.rs | 7 +- beacon_node/http_api/tests/common.rs | 27 +- .../http_api/tests/interactive_tests.rs | 506 +++++++++++++- beacon_node/src/cli.rs | 32 + beacon_node/src/config.rs | 26 + book/src/SUMMARY.md | 1 + book/src/builders.md | 12 +- book/src/late-block-re-orgs.md | 60 ++ common/task_executor/src/test_utils.rs | 7 + consensus/fork_choice/src/fork_choice.rs | 83 ++- .../fork_choice/src/fork_choice_store.rs | 3 +- consensus/fork_choice/tests/tests.rs | 8 +- consensus/proto_array/Cargo.toml | 1 + consensus/proto_array/src/error.rs | 9 + .../src/fork_choice_test_definition.rs | 17 +- .../execution_status.rs | 2 +- .../proto_array/src/justified_balances.rs | 62 ++ consensus/proto_array/src/lib.rs | 11 +- consensus/proto_array/src/proto_array.rs | 43 +- .../src/proto_array_fork_choice.rs | 278 +++++++- consensus/proto_array/src/ssz_container.rs | 18 +- lcli/src/main.rs | 8 + lcli/src/new_testnet.rs | 4 + lighthouse/tests/beacon_node.rs | 72 ++ lighthouse/tests/validator_client.rs | 18 + scripts/local_testnet/setup.sh | 1 + scripts/local_testnet/vars.env | 3 + validator_client/src/block_service.rs | 21 + validator_client/src/cli.rs | 14 +- validator_client/src/config.rs | 13 + validator_client/src/lib.rs | 1 + 55 files changed, 2301 insertions(+), 333 deletions(-) create mode 100644 beacon_node/execution_layer/src/test_utils/hook.rs create mode 100644 book/src/late-block-re-orgs.md create mode 100644 consensus/proto_array/src/justified_balances.rs diff --git a/.github/custom/clippy.toml b/.github/custom/clippy.toml index f50e35bcdfd..3ccbeee44a7 100644 --- a/.github/custom/clippy.toml +++ b/.github/custom/clippy.toml @@ -16,6 +16,7 @@ async-wrapper-methods = [ "task_executor::TaskExecutor::spawn_blocking_handle", "warp_utils::task::blocking_task", "warp_utils::task::blocking_json_task", + "beacon_chain::beacon_chain::BeaconChain::spawn_blocking_handle", "validator_client::http_api::blocking_signed_json_task", "execution_layer::test_utils::MockServer::new", "execution_layer::test_utils::MockServer::new_with_config", diff --git a/Cargo.lock b/Cargo.lock index 12d44d3e161..2f7cd873dd7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4965,6 +4965,7 @@ version = "0.2.0" dependencies = [ "eth2_ssz", "eth2_ssz_derive", + "safe_arith", "serde", "serde_derive", "serde_yaml", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 309f6a83e07..55d6ae29efb 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -44,9 +44,8 @@ use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; use crate::persisted_fork_choice::PersistedForkChoice; use crate::pre_finalization_cache::PreFinalizationBlockCache; -use crate::proposer_prep_service::PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; -use crate::snapshot_cache::SnapshotCache; +use crate::snapshot_cache::{BlockProductionPreState, SnapshotCache}; use crate::sync_committee_verification::{ Error as SyncCommitteeError, VerifiedSyncCommitteeMessage, VerifiedSyncContribution, }; @@ -56,9 +55,7 @@ use crate::validator_monitor::{ HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS, }; use crate::validator_pubkey_cache::ValidatorPubkeyCache; -use crate::BeaconForkChoiceStore; -use crate::BeaconSnapshot; -use crate::{metrics, BeaconChainError}; +use crate::{metrics, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead}; use eth2::types::{EventKind, SseBlock, SyncDuty}; use execution_layer::{ BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, @@ -72,7 +69,7 @@ use itertools::process_results; use itertools::Itertools; use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; -use proto_array::CountUnrealizedFull; +use proto_array::{CountUnrealizedFull, DoNotReOrg, ProposerHeadError}; use safe_arith::SafeArith; use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; @@ -103,6 +100,7 @@ use store::{ use task_executor::{ShutdownReason, TaskExecutor}; use tree_hash::TreeHash; use types::beacon_state::CloneConfig; +use types::consts::merge::INTERVALS_PER_SLOT; use types::*; pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; @@ -127,6 +125,12 @@ pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1) /// The timeout for the eth1 finalization cache pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200); +/// The latest delay from the start of the slot at which to attempt a 1-slot re-org. +fn max_re_org_slot_delay(seconds_per_slot: u64) -> Duration { + // Allow at least half of the attestation deadline for the block to propagate. + Duration::from_secs(seconds_per_slot) / INTERVALS_PER_SLOT as u32 / 2 +} + // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero(); pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); @@ -188,6 +192,21 @@ pub enum ProduceBlockVerification { NoVerification, } +/// Payload attributes for which the `beacon_chain` crate is responsible. +pub struct PrePayloadAttributes { + pub proposer_index: u64, + pub prev_randao: Hash256, +} + +/// Define whether a forkchoiceUpdate needs to be checked for an override (`Yes`) or has already +/// been checked (`AlreadyApplied`). It is safe to specify `Yes` even if re-orgs are disabled. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +pub enum OverrideForkchoiceUpdate { + #[default] + Yes, + AlreadyApplied, +} + /// The accepted clock drift for nodes gossiping blocks and attestations. See: /// /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#configuration @@ -2756,6 +2775,7 @@ impl BeaconChain { if !payload_verification_status.is_optimistic() && block.slot() + EARLY_ATTESTER_CACHE_HISTORIC_SLOTS >= current_slot { + let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE); match fork_choice.get_head(current_slot, &self.spec) { // This block became the head, add it to the early attester cache. Ok(new_head_root) if new_head_root == block_root => { @@ -2789,6 +2809,7 @@ impl BeaconChain { "error" => ?e ), } + drop(fork_choice_timer); } drop(post_exec_timer); @@ -3475,6 +3496,7 @@ impl BeaconChain { // signed. If we miss the cache or we're producing a block that conflicts with the head, // fall back to getting the head from `slot - 1`. let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES); + // Atomically read some values from the head whilst avoiding holding cached head `Arc` any // longer than necessary. let (head_slot, head_block_root) = { @@ -3482,8 +3504,19 @@ impl BeaconChain { (head.head_slot(), head.head_block_root()) }; let (state, state_root_opt) = if head_slot < slot { + // Attempt an aggressive re-org if configured and the conditions are right. + if let Some(re_org_state) = self.get_state_for_re_org(slot, head_slot, head_block_root) + { + info!( + self.log, + "Proposing block to re-org current head"; + "slot" => slot, + "head_to_reorg" => %head_block_root, + ); + (re_org_state.pre_state, re_org_state.state_root) + } // Normal case: proposing a block atop the current head. Use the snapshot cache. - if let Some(pre_state) = self + else if let Some(pre_state) = self .snapshot_cache .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .and_then(|snapshot_cache| { @@ -3523,6 +3556,400 @@ impl BeaconChain { Ok((state, state_root_opt)) } + /// Fetch the beacon state to use for producing a block if a 1-slot proposer re-org is viable. + /// + /// This function will return `None` if proposer re-orgs are disabled. + fn get_state_for_re_org( + &self, + slot: Slot, + head_slot: Slot, + canonical_head: Hash256, + ) -> Option> { + let re_org_threshold = self.config.re_org_threshold?; + + if self.spec.proposer_score_boost.is_none() { + warn!( + self.log, + "Ignoring proposer re-org configuration"; + "reason" => "this network does not have proposer boost enabled" + ); + return None; + } + + let slot_delay = self + .slot_clock + .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .or_else(|| { + warn!( + self.log, + "Not attempting re-org"; + "error" => "unable to read slot clock" + ); + None + })?; + + // Attempt a proposer re-org if: + // + // 1. It seems we have time to propagate and still receive the proposer boost. + // 2. The current head block was seen late. + // 3. The `get_proposer_head` conditions from fork choice pass. + let proposing_on_time = slot_delay < max_re_org_slot_delay(self.spec.seconds_per_slot); + if !proposing_on_time { + debug!( + self.log, + "Not attempting re-org"; + "reason" => "not proposing on time", + ); + return None; + } + + let head_late = self.block_observed_after_attestation_deadline(canonical_head, head_slot); + if !head_late { + debug!( + self.log, + "Not attempting re-org"; + "reason" => "head not late" + ); + return None; + } + + // Is the current head weak and appropriate for re-orging? + let proposer_head_timer = + metrics::start_timer(&metrics::BLOCK_PRODUCTION_GET_PROPOSER_HEAD_TIMES); + let proposer_head = self + .canonical_head + .fork_choice_read_lock() + .get_proposer_head( + slot, + canonical_head, + re_org_threshold, + self.config.re_org_max_epochs_since_finalization, + ) + .map_err(|e| match e { + ProposerHeadError::DoNotReOrg(reason) => { + debug!( + self.log, + "Not attempting re-org"; + "reason" => %reason, + ); + } + ProposerHeadError::Error(e) => { + warn!( + self.log, + "Not attempting re-org"; + "error" => ?e, + ); + } + }) + .ok()?; + drop(proposer_head_timer); + let re_org_parent_block = proposer_head.parent_node.root; + + // Only attempt a re-org if we hit the snapshot cache. + let pre_state = self + .snapshot_cache + .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .and_then(|snapshot_cache| { + snapshot_cache.get_state_for_block_production(re_org_parent_block) + }) + .or_else(|| { + debug!( + self.log, + "Not attempting re-org"; + "reason" => "missed snapshot cache", + "parent_block" => ?re_org_parent_block, + ); + None + })?; + + info!( + self.log, + "Attempting re-org due to weak head"; + "weak_head" => ?canonical_head, + "parent" => ?re_org_parent_block, + "head_weight" => proposer_head.head_node.weight, + "threshold_weight" => proposer_head.re_org_weight_threshold + ); + + Some(pre_state) + } + + /// Get the proposer index and `prev_randao` value for a proposal at slot `proposal_slot`. + /// + /// The `proposer_head` may be the head block of `cached_head` or its parent. An error will + /// be returned for any other value. + pub fn get_pre_payload_attributes( + &self, + proposal_slot: Slot, + proposer_head: Hash256, + cached_head: &CachedHead, + ) -> Result, Error> { + let proposal_epoch = proposal_slot.epoch(T::EthSpec::slots_per_epoch()); + + let head_block_root = cached_head.head_block_root(); + let parent_block_root = cached_head.parent_block_root(); + + // The proposer head must be equal to the canonical head or its parent. + if proposer_head != head_block_root && proposer_head != parent_block_root { + warn!( + self.log, + "Unable to compute payload attributes"; + "block_root" => ?proposer_head, + "head_block_root" => ?head_block_root, + ); + return Ok(None); + } + + // Compute the proposer index. + let head_epoch = cached_head.head_slot().epoch(T::EthSpec::slots_per_epoch()); + let shuffling_decision_root = if head_epoch == proposal_epoch { + cached_head + .snapshot + .beacon_state + .proposer_shuffling_decision_root(proposer_head)? + } else { + proposer_head + }; + let cached_proposer = self + .beacon_proposer_cache + .lock() + .get_slot::(shuffling_decision_root, proposal_slot); + let proposer_index = if let Some(proposer) = cached_proposer { + proposer.index as u64 + } else { + if head_epoch + 2 < proposal_epoch { + warn!( + self.log, + "Skipping proposer preparation"; + "msg" => "this is a non-critical issue that can happen on unhealthy nodes or \ + networks.", + "proposal_epoch" => proposal_epoch, + "head_epoch" => head_epoch, + ); + + // Don't skip the head forward more than two epochs. This avoids burdening an + // unhealthy node. + // + // Although this node might miss out on preparing for a proposal, they should still + // be able to propose. This will prioritise beacon chain health over efficient + // packing of execution blocks. + return Ok(None); + } + + let (proposers, decision_root, _, fork) = + compute_proposer_duties_from_head(proposal_epoch, self)?; + + let proposer_offset = (proposal_slot % T::EthSpec::slots_per_epoch()).as_usize(); + let proposer = *proposers + .get(proposer_offset) + .ok_or(BeaconChainError::NoProposerForSlot(proposal_slot))?; + + self.beacon_proposer_cache.lock().insert( + proposal_epoch, + decision_root, + proposers, + fork, + )?; + + // It's possible that the head changes whilst computing these duties. If so, abandon + // this routine since the change of head would have also spawned another instance of + // this routine. + // + // Exit now, after updating the cache. + if decision_root != shuffling_decision_root { + warn!( + self.log, + "Head changed during proposer preparation"; + ); + return Ok(None); + } + + proposer as u64 + }; + + // Get the `prev_randao` value. + let prev_randao = if proposer_head == parent_block_root { + cached_head.parent_random() + } else { + cached_head.head_random() + }?; + + Ok(Some(PrePayloadAttributes { + proposer_index, + prev_randao, + })) + } + + /// Determine whether a fork choice update to the execution layer should be overridden. + /// + /// This is *only* necessary when proposer re-orgs are enabled, because we have to prevent the + /// execution layer from enshrining the block we want to re-org as the head. + /// + /// This function uses heuristics that align quite closely but not exactly with the re-org + /// conditions set out in `get_state_for_re_org` and `get_proposer_head`. The differences are + /// documented below. + fn overridden_forkchoice_update_params( + &self, + canonical_forkchoice_params: ForkchoiceUpdateParameters, + ) -> Result { + self.overridden_forkchoice_update_params_or_failure_reason(&canonical_forkchoice_params) + .or_else(|e| match e { + ProposerHeadError::DoNotReOrg(reason) => { + trace!( + self.log, + "Not suppressing fork choice update"; + "reason" => %reason, + ); + Ok(canonical_forkchoice_params) + } + ProposerHeadError::Error(e) => Err(e), + }) + } + + fn overridden_forkchoice_update_params_or_failure_reason( + &self, + canonical_forkchoice_params: &ForkchoiceUpdateParameters, + ) -> Result> { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_OVERRIDE_FCU_TIMES); + + // Never override if proposer re-orgs are disabled. + let re_org_threshold = self + .config + .re_org_threshold + .ok_or(DoNotReOrg::ReOrgsDisabled)?; + + let head_block_root = canonical_forkchoice_params.head_root; + + // Perform initial checks and load the relevant info from fork choice. + let info = self + .canonical_head + .fork_choice_read_lock() + .get_preliminary_proposer_head( + head_block_root, + re_org_threshold, + self.config.re_org_max_epochs_since_finalization, + ) + .map_err(|e| e.map_inner_error(Error::ProposerHeadForkChoiceError))?; + + // The slot of our potential re-org block is always 1 greater than the head block because we + // only attempt single-slot re-orgs. + let head_slot = info.head_node.slot; + let re_org_block_slot = head_slot + 1; + let fork_choice_slot = info.current_slot; + + // If a re-orging proposal isn't made by the `max_re_org_slot_delay` then we give up + // and allow the fork choice update for the canonical head through so that we may attest + // correctly. + let current_slot_ok = if head_slot == fork_choice_slot { + true + } else if re_org_block_slot == fork_choice_slot { + self.slot_clock + .start_of(re_org_block_slot) + .and_then(|slot_start| { + let now = self.slot_clock.now_duration()?; + let slot_delay = now.saturating_sub(slot_start); + Some(slot_delay <= max_re_org_slot_delay(self.spec.seconds_per_slot)) + }) + .unwrap_or(false) + } else { + false + }; + if !current_slot_ok { + return Err(DoNotReOrg::HeadDistance.into()); + } + + // Only attempt a re-org if we have a proposer registered for the re-org slot. + let proposing_at_re_org_slot = { + // The proposer shuffling has the same decision root as the next epoch attestation + // shuffling. We know our re-org block is not on the epoch boundary, so it has the + // same proposer shuffling as the head (but not necessarily the parent which may lie + // in the previous epoch). + let shuffling_decision_root = info + .head_node + .next_epoch_shuffling_id + .shuffling_decision_block; + let proposer_index = self + .beacon_proposer_cache + .lock() + .get_slot::(shuffling_decision_root, re_org_block_slot) + .ok_or_else(|| { + debug!( + self.log, + "Fork choice override proposer shuffling miss"; + "slot" => re_org_block_slot, + "decision_root" => ?shuffling_decision_root, + ); + DoNotReOrg::NotProposing + })? + .index as u64; + + self.execution_layer + .as_ref() + .ok_or(ProposerHeadError::Error(Error::ExecutionLayerMissing))? + .has_proposer_preparation_data_blocking(proposer_index) + }; + if !proposing_at_re_org_slot { + return Err(DoNotReOrg::NotProposing.into()); + } + + // If the current slot is already equal to the proposal slot (or we are in the tail end of + // the prior slot), then check the actual weight of the head against the re-org threshold. + let head_weak = if fork_choice_slot == re_org_block_slot { + info.head_node.weight < info.re_org_weight_threshold + } else { + true + }; + if !head_weak { + return Err(DoNotReOrg::HeadNotWeak { + head_weight: info.head_node.weight, + re_org_weight_threshold: info.re_org_weight_threshold, + } + .into()); + } + + // Check that the head block arrived late and is vulnerable to a re-org. This check is only + // a heuristic compared to the proper weight check in `get_state_for_re_org`, the reason + // being that we may have only *just* received the block and not yet processed any + // attestations for it. We also can't dequeue attestations for the block during the + // current slot, which would be necessary for determining its weight. + let head_block_late = + self.block_observed_after_attestation_deadline(head_block_root, head_slot); + if !head_block_late { + return Err(DoNotReOrg::HeadNotLate.into()); + } + + let parent_head_hash = info.parent_node.execution_status.block_hash(); + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root: info.parent_node.root, + head_hash: parent_head_hash, + justified_hash: canonical_forkchoice_params.justified_hash, + finalized_hash: canonical_forkchoice_params.finalized_hash, + }; + + debug!( + self.log, + "Fork choice update overridden"; + "canonical_head" => ?head_block_root, + "override" => ?info.parent_node.root, + "slot" => fork_choice_slot, + ); + + Ok(forkchoice_update_params) + } + + /// Check if the block with `block_root` was observed after the attestation deadline of `slot`. + fn block_observed_after_attestation_deadline(&self, block_root: Hash256, slot: Slot) -> bool { + let block_delays = self.block_times_cache.read().get_block_delays( + block_root, + self.slot_clock + .start_of(slot) + .unwrap_or_else(|| Duration::from_secs(0)), + ); + block_delays.observed.map_or(false, |delay| { + delay > self.slot_clock.unagg_attestation_production_delay() + }) + } + /// Produce a block for some `slot` upon the given `state`. /// /// Typically the `self.produce_block()` function should be used, instead of calling this @@ -4085,17 +4512,13 @@ impl BeaconChain { /// The `PayloadAttributes` are used by the EL to give it a look-ahead for preparing an optimal /// set of transactions for a new `ExecutionPayload`. /// - /// This function will result in a call to `forkchoiceUpdated` on the EL if: - /// - /// 1. We're in the tail-end of the slot (as defined by PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR) - /// 2. The head block is one slot (or less) behind the prepare slot (e.g., we're preparing for - /// the next slot and the block at the current slot is already known). + /// This function will result in a call to `forkchoiceUpdated` on the EL if we're in the + /// tail-end of the slot (as defined by `self.config.prepare_payload_lookahead`). pub async fn prepare_beacon_proposer( self: &Arc, current_slot: Slot, ) -> Result<(), Error> { let prepare_slot = current_slot + 1; - let prepare_epoch = prepare_slot.epoch(T::EthSpec::slots_per_epoch()); // There's no need to run the proposer preparation routine before the bellatrix fork. if self.slot_is_prior_to_bellatrix(prepare_slot) { @@ -4113,158 +4536,99 @@ impl BeaconChain { return Ok(()); } - // Atomically read some values from the canonical head, whilst avoiding holding the cached - // head `Arc` any longer than necessary. + // Load the cached head and its forkchoice update parameters. // // Use a blocking task since blocking the core executor on the canonical head read lock can // block the core tokio executor. let chain = self.clone(); - let (head_slot, head_root, head_decision_root, head_random, forkchoice_update_params) = - self.spawn_blocking_handle( + let maybe_prep_data = self + .spawn_blocking_handle( move || { let cached_head = chain.canonical_head.cached_head(); - let head_block_root = cached_head.head_block_root(); - let decision_root = cached_head - .snapshot - .beacon_state - .proposer_shuffling_decision_root(head_block_root)?; - Ok::<_, Error>(( - cached_head.head_slot(), - head_block_root, - decision_root, - cached_head.head_random()?, - cached_head.forkchoice_update_parameters(), - )) + + // Don't bother with proposer prep if the head is more than + // `PREPARE_PROPOSER_HISTORIC_EPOCHS` prior to the current slot. + // + // This prevents the routine from running during sync. + let head_slot = cached_head.head_slot(); + if head_slot + T::EthSpec::slots_per_epoch() * PREPARE_PROPOSER_HISTORIC_EPOCHS + < current_slot + { + debug!( + chain.log, + "Head too old for proposer prep"; + "head_slot" => head_slot, + "current_slot" => current_slot, + ); + return Ok(None); + } + + let canonical_fcu_params = cached_head.forkchoice_update_parameters(); + let fcu_params = + chain.overridden_forkchoice_update_params(canonical_fcu_params)?; + let pre_payload_attributes = chain.get_pre_payload_attributes( + prepare_slot, + fcu_params.head_root, + &cached_head, + )?; + Ok::<_, Error>(Some((fcu_params, pre_payload_attributes))) }, - "prepare_beacon_proposer_fork_choice_read", + "prepare_beacon_proposer_head_read", ) .await??; - let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch()); - // Don't bother with proposer prep if the head is more than - // `PREPARE_PROPOSER_HISTORIC_EPOCHS` prior to the current slot. - // - // This prevents the routine from running during sync. - if head_slot + T::EthSpec::slots_per_epoch() * PREPARE_PROPOSER_HISTORIC_EPOCHS - < current_slot - { - debug!( - self.log, - "Head too old for proposer prep"; - "head_slot" => head_slot, - "current_slot" => current_slot, - ); - return Ok(()); - } - - // Ensure that the shuffling decision root is correct relative to the epoch we wish to - // query. - let shuffling_decision_root = if head_epoch == prepare_epoch { - head_decision_root - } else { - head_root - }; - - // Read the proposer from the proposer cache. - let cached_proposer = self - .beacon_proposer_cache - .lock() - .get_slot::(shuffling_decision_root, prepare_slot); - let proposer = if let Some(proposer) = cached_proposer { - proposer.index - } else { - if head_epoch + 2 < prepare_epoch { - warn!( - self.log, - "Skipping proposer preparation"; - "msg" => "this is a non-critical issue that can happen on unhealthy nodes or \ - networks.", - "prepare_epoch" => prepare_epoch, - "head_epoch" => head_epoch, - ); - - // Don't skip the head forward more than two epochs. This avoids burdening an - // unhealthy node. - // - // Although this node might miss out on preparing for a proposal, they should still - // be able to propose. This will prioritise beacon chain health over efficient - // packing of execution blocks. - return Ok(()); - } - - let (proposers, decision_root, _, fork) = - compute_proposer_duties_from_head(prepare_epoch, self)?; - - let proposer_index = prepare_slot.as_usize() % (T::EthSpec::slots_per_epoch() as usize); - let proposer = *proposers - .get(proposer_index) - .ok_or(BeaconChainError::NoProposerForSlot(prepare_slot))?; - - self.beacon_proposer_cache.lock().insert( - prepare_epoch, - decision_root, - proposers, - fork, - )?; - - // It's possible that the head changes whilst computing these duties. If so, abandon - // this routine since the change of head would have also spawned another instance of - // this routine. - // - // Exit now, after updating the cache. - if decision_root != shuffling_decision_root { - warn!( - self.log, - "Head changed during proposer preparation"; - ); + let (forkchoice_update_params, pre_payload_attributes) = + if let Some((fcu, Some(pre_payload))) = maybe_prep_data { + (fcu, pre_payload) + } else { + // Appropriate log messages have already been logged above and in + // `get_pre_payload_attributes`. return Ok(()); - } - - proposer - }; + }; // If the execution layer doesn't have any proposer data for this validator then we assume // it's not connected to this BN and no action is required. + let proposer = pre_payload_attributes.proposer_index; if !execution_layer - .has_proposer_preparation_data(proposer as u64) + .has_proposer_preparation_data(proposer) .await { return Ok(()); } + let head_root = forkchoice_update_params.head_root; let payload_attributes = PayloadAttributes { timestamp: self .slot_clock .start_of(prepare_slot) .ok_or(Error::InvalidSlot(prepare_slot))? .as_secs(), - prev_randao: head_random, - suggested_fee_recipient: execution_layer - .get_suggested_fee_recipient(proposer as u64) - .await, + prev_randao: pre_payload_attributes.prev_randao, + suggested_fee_recipient: execution_layer.get_suggested_fee_recipient(proposer).await, }; debug!( self.log, "Preparing beacon proposer"; "payload_attributes" => ?payload_attributes, - "head_root" => ?head_root, "prepare_slot" => prepare_slot, "validator" => proposer, + "parent_root" => ?head_root, ); let already_known = execution_layer - .insert_proposer(prepare_slot, head_root, proposer as u64, payload_attributes) + .insert_proposer(prepare_slot, head_root, proposer, payload_attributes) .await; + // Only push a log to the user if this is the first time we've seen this proposer for this // slot. if !already_known { info!( self.log, "Prepared beacon proposer"; - "already_known" => already_known, "prepare_slot" => prepare_slot, "validator" => proposer, + "parent_root" => ?head_root, ); } @@ -4286,27 +4650,22 @@ impl BeaconChain { return Ok(()); }; - // If either of the following are true, send a fork-choice update message to the - // EL: - // - // 1. We're in the tail-end of the slot (as defined by - // PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR) - // 2. The head block is one slot (or less) behind the prepare slot (e.g., we're - // preparing for the next slot and the block at the current slot is already - // known). - if till_prepare_slot - <= self.slot_clock.slot_duration() / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR - || head_slot + 1 >= prepare_slot - { + // If we are close enough to the proposal slot, send an fcU, which will have payload + // attributes filled in by the execution layer cache we just primed. + if till_prepare_slot <= self.config.prepare_payload_lookahead { debug!( self.log, - "Pushing update to prepare proposer"; + "Sending forkchoiceUpdate for proposer prep"; "till_prepare_slot" => ?till_prepare_slot, "prepare_slot" => prepare_slot ); - self.update_execution_engine_forkchoice(current_slot, forkchoice_update_params) - .await?; + self.update_execution_engine_forkchoice( + current_slot, + forkchoice_update_params, + OverrideForkchoiceUpdate::AlreadyApplied, + ) + .await?; } Ok(()) @@ -4315,7 +4674,8 @@ impl BeaconChain { pub async fn update_execution_engine_forkchoice( self: &Arc, current_slot: Slot, - params: ForkchoiceUpdateParameters, + input_params: ForkchoiceUpdateParameters, + override_forkchoice_update: OverrideForkchoiceUpdate, ) -> Result<(), Error> { let next_slot = current_slot + 1; @@ -4337,6 +4697,19 @@ impl BeaconChain { .as_ref() .ok_or(Error::ExecutionLayerMissing)?; + // Determine whether to override the forkchoiceUpdated message if we want to re-org + // the current head at the next slot. + let params = if override_forkchoice_update == OverrideForkchoiceUpdate::Yes { + let chain = self.clone(); + self.spawn_blocking_handle( + move || chain.overridden_forkchoice_update_params(input_params), + "update_execution_engine_forkchoice_override", + ) + .await?? + } else { + input_params + }; + // Take the global lock for updating the execution engine fork choice. // // Whilst holding this lock we must: diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 5cba5f3c3bb..0b789b8b615 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -7,6 +7,8 @@ use crate::{metrics, BeaconSnapshot}; use derivative::Derivative; use fork_choice::ForkChoiceStore; +use proto_array::JustifiedBalances; +use safe_arith::ArithError; use ssz_derive::{Decode, Encode}; use std::collections::BTreeSet; use std::marker::PhantomData; @@ -31,6 +33,7 @@ pub enum Error { MissingState(Hash256), InvalidPersistedBytes(ssz::DecodeError), BeaconStateError(BeaconStateError), + Arith(ArithError), } impl From for Error { @@ -39,27 +42,15 @@ impl From for Error { } } +impl From for Error { + fn from(e: ArithError) -> Self { + Error::Arith(e) + } +} + /// The number of validator balance sets that are cached within `BalancesCache`. const MAX_BALANCE_CACHE_SIZE: usize = 4; -/// Returns the effective balances for every validator in the given `state`. -/// -/// Any validator who is not active in the epoch of the given `state` is assigned a balance of -/// zero. -pub fn get_effective_balances(state: &BeaconState) -> Vec { - state - .validators() - .iter() - .map(|validator| { - if validator.is_active_at(state.current_epoch()) { - validator.effective_balance - } else { - 0 - } - }) - .collect() -} - #[superstruct( variants(V8), variant_attributes(derive(PartialEq, Clone, Debug, Encode, Decode)), @@ -113,7 +104,7 @@ impl BalancesCache { let item = CacheItem { block_root: epoch_boundary_root, epoch, - balances: get_effective_balances(state), + balances: JustifiedBalances::from_justified_state(state)?.effective_balances, }; if self.items.len() == MAX_BALANCE_CACHE_SIZE { @@ -152,7 +143,7 @@ pub struct BeaconForkChoiceStore, Cold: ItemStore< time: Slot, finalized_checkpoint: Checkpoint, justified_checkpoint: Checkpoint, - justified_balances: Vec, + justified_balances: JustifiedBalances, best_justified_checkpoint: Checkpoint, unrealized_justified_checkpoint: Checkpoint, unrealized_finalized_checkpoint: Checkpoint, @@ -181,7 +172,7 @@ where pub fn get_forkchoice_store( store: Arc>, anchor: &BeaconSnapshot, - ) -> Self { + ) -> Result { let anchor_state = &anchor.beacon_state; let mut anchor_block_header = anchor_state.latest_block_header().clone(); if anchor_block_header.state_root == Hash256::zero() { @@ -194,13 +185,14 @@ where root: anchor_root, }; let finalized_checkpoint = justified_checkpoint; + let justified_balances = JustifiedBalances::from_justified_state(anchor_state)?; - Self { + Ok(Self { store, balances_cache: <_>::default(), time: anchor_state.slot(), justified_checkpoint, - justified_balances: anchor_state.balances().clone().into(), + justified_balances, finalized_checkpoint, best_justified_checkpoint: justified_checkpoint, unrealized_justified_checkpoint: justified_checkpoint, @@ -208,7 +200,7 @@ where proposer_boost_root: Hash256::zero(), equivocating_indices: BTreeSet::new(), _phantom: PhantomData, - } + }) } /// Save the current state of `Self` to a `PersistedForkChoiceStore` which can be stored to the @@ -219,7 +211,7 @@ where time: self.time, finalized_checkpoint: self.finalized_checkpoint, justified_checkpoint: self.justified_checkpoint, - justified_balances: self.justified_balances.clone(), + justified_balances: self.justified_balances.effective_balances.clone(), best_justified_checkpoint: self.best_justified_checkpoint, unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, @@ -233,13 +225,15 @@ where persisted: PersistedForkChoiceStore, store: Arc>, ) -> Result { + let justified_balances = + JustifiedBalances::from_effective_balances(persisted.justified_balances)?; Ok(Self { store, balances_cache: persisted.balances_cache, time: persisted.time, finalized_checkpoint: persisted.finalized_checkpoint, justified_checkpoint: persisted.justified_checkpoint, - justified_balances: persisted.justified_balances, + justified_balances, best_justified_checkpoint: persisted.best_justified_checkpoint, unrealized_justified_checkpoint: persisted.unrealized_justified_checkpoint, unrealized_finalized_checkpoint: persisted.unrealized_finalized_checkpoint, @@ -279,7 +273,7 @@ where &self.justified_checkpoint } - fn justified_balances(&self) -> &[u64] { + fn justified_balances(&self) -> &JustifiedBalances { &self.justified_balances } @@ -314,8 +308,9 @@ where self.justified_checkpoint.root, self.justified_checkpoint.epoch, ) { + // NOTE: could avoid this re-calculation by introducing a `PersistedCacheItem`. metrics::inc_counter(&metrics::BALANCES_CACHE_HITS); - self.justified_balances = balances; + self.justified_balances = JustifiedBalances::from_effective_balances(balances)?; } else { metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES); let justified_block = self @@ -332,7 +327,7 @@ where .map_err(Error::FailedToReadState)? .ok_or_else(|| Error::MissingState(justified_block.state_root()))?; - self.justified_balances = get_effective_balances(&state); + self.justified_balances = JustifiedBalances::from_justified_state(&state)?; } Ok(()) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index f5bd85dec28..eff50701d7a 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -22,6 +22,7 @@ use fork_choice::{ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; +use proto_array::ReOrgThreshold; use slasher::Slasher; use slog::{crit, error, info, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; @@ -31,8 +32,8 @@ use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use types::{ - BeaconBlock, BeaconState, ChainSpec, Checkpoint, EthSpec, Graffiti, Hash256, PublicKeyBytes, - Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, Hash256, + PublicKeyBytes, Signature, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -159,6 +160,21 @@ where self } + /// Sets the proposer re-org threshold. + pub fn proposer_re_org_threshold(mut self, threshold: Option) -> Self { + self.chain_config.re_org_threshold = threshold; + self + } + + /// Sets the proposer re-org max epochs since finalization. + pub fn proposer_re_org_max_epochs_since_finalization( + mut self, + epochs_since_finalization: Epoch, + ) -> Self { + self.chain_config.re_org_max_epochs_since_finalization = epochs_since_finalization; + self + } + /// Sets the store (database). /// /// Should generally be called early in the build chain. @@ -358,7 +374,8 @@ where let (genesis, updated_builder) = self.set_genesis_state(beacon_state)?; self = updated_builder; - let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis); + let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis) + .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; let current_slot = None; let fork_choice = ForkChoice::from_anchor( @@ -476,7 +493,8 @@ where beacon_state: weak_subj_state, }; - let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot); + let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot) + .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; let current_slot = Some(snapshot.beacon_block.slot()); let fork_choice = ForkChoice::from_anchor( diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index c9bd6db0e67..dd64e02edf7 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -34,7 +34,8 @@ use crate::persisted_fork_choice::PersistedForkChoice; use crate::{ beacon_chain::{ - BeaconForkChoice, BeaconStore, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, FORK_CHOICE_DB_KEY, + BeaconForkChoice, BeaconStore, OverrideForkchoiceUpdate, + BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, FORK_CHOICE_DB_KEY, }, block_times_cache::BlockTimesCache, events::ServerSentEventHandler, @@ -114,6 +115,11 @@ impl CachedHead { self.snapshot.beacon_block_root } + /// Returns the root of the parent of the head block. + pub fn parent_block_root(&self) -> Hash256 { + self.snapshot.beacon_block.parent_root() + } + /// Returns root of the `BeaconState` at the head of the beacon chain. /// /// ## Note @@ -146,6 +152,21 @@ impl CachedHead { Ok(root) } + /// Returns the randao mix for the parent of the block at the head of the chain. + /// + /// This is useful for re-orging the current head. The parent's RANDAO value is read from + /// the head's execution payload because it is unavailable in the beacon state's RANDAO mixes + /// array after being overwritten by the head block's RANDAO mix. + /// + /// This will error if the head block is not execution-enabled (post Bellatrix). + pub fn parent_random(&self) -> Result { + self.snapshot + .beacon_block + .message() + .execution_payload() + .map(|payload| payload.prev_randao()) + } + /// Returns the active validator count for the current epoch of the head state. /// /// Should only return `None` if the caches have not been built on the head state (this should @@ -765,6 +786,7 @@ impl BeaconChain { new_cached_head: &CachedHead, new_head_proto_block: ProtoBlock, ) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_AFTER_NEW_HEAD_TIMES); let old_snapshot = &old_cached_head.snapshot; let new_snapshot = &new_cached_head.snapshot; let new_head_is_optimistic = new_head_proto_block @@ -902,6 +924,7 @@ impl BeaconChain { new_view: ForkChoiceView, finalized_proto_block: ProtoBlock, ) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_AFTER_FINALIZATION_TIMES); let new_snapshot = &new_cached_head.snapshot; let finalized_block_is_optimistic = finalized_proto_block .execution_status @@ -1124,7 +1147,11 @@ fn spawn_execution_layer_updates( } if let Err(e) = chain - .update_execution_engine_forkchoice(current_slot, forkchoice_update_params) + .update_execution_engine_forkchoice( + current_slot, + forkchoice_update_params, + OverrideForkchoiceUpdate::Yes, + ) .await { crit!( diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 286cc17a963..c4c6966732d 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,9 +1,18 @@ -pub use proto_array::CountUnrealizedFull; +pub use proto_array::{CountUnrealizedFull, ReOrgThreshold}; use serde_derive::{Deserialize, Serialize}; -use types::Checkpoint; +use std::time::Duration; +use types::{Checkpoint, Epoch}; +pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); +pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2); pub const DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT: u64 = 250; +/// Default fraction of a slot lookahead for payload preparation (12/3 = 4 seconds on mainnet). +pub const DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR: u32 = 3; + +/// Fraction of a slot lookahead for fork choice in the state advance timer (500ms on mainnet). +pub const FORK_CHOICE_LOOKAHEAD_FACTOR: u32 = 24; + #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)] pub struct ChainConfig { /// Maximum number of slots to skip when importing a consensus message (e.g., block, @@ -21,6 +30,10 @@ pub struct ChainConfig { pub enable_lock_timeouts: bool, /// The max size of a message that can be sent over the network. pub max_network_size: usize, + /// Maximum percentage of committee weight at which to attempt re-orging the canonical head. + pub re_org_threshold: Option, + /// Maximum number of epochs since finalization for attempting a proposer re-org. + pub re_org_max_epochs_since_finalization: Epoch, /// Number of milliseconds to wait for fork choice before proposing a block. /// /// If set to 0 then block proposal will not wait for fork choice at all. @@ -47,6 +60,11 @@ pub struct ChainConfig { pub count_unrealized_full: CountUnrealizedFull, /// Optionally set timeout for calls to checkpoint sync endpoint. pub checkpoint_sync_url_timeout: u64, + /// The offset before the start of a proposal slot at which payload attributes should be sent. + /// + /// Low values are useful for execution engines which don't improve their payload after the + /// first call, and high values are useful for ensuring the EL is given ample notice. + pub prepare_payload_lookahead: Duration, } impl Default for ChainConfig { @@ -57,6 +75,8 @@ impl Default for ChainConfig { reconstruct_historic_states: false, enable_lock_timeouts: true, max_network_size: 10 * 1_048_576, // 10M + re_org_threshold: Some(DEFAULT_RE_ORG_THRESHOLD), + re_org_max_epochs_since_finalization: DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT, // Builder fallback configs that are set in `clap` will override these. builder_fallback_skips: 3, @@ -68,6 +88,7 @@ impl Default for ChainConfig { paranoid_block_proposal: false, count_unrealized_full: CountUnrealizedFull::default(), checkpoint_sync_url_timeout: 60, + prepare_payload_lookahead: Duration::from_secs(4), } } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 704cba489d2..17f58b223f4 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -204,6 +204,7 @@ pub enum BeaconChainError { MissingPersistedForkChoice, CommitteePromiseFailed(oneshot_broadcast::Error), MaxCommitteePromises(usize), + ProposerHeadForkChoiceError(fork_choice::Error), } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -234,6 +235,7 @@ pub enum BlockProductionError { UnableToProduceAtSlot(Slot), SlotProcessingError(SlotProcessingError), BlockProcessingError(BlockProcessingError), + ForkChoiceError(ForkChoiceError), Eth1ChainError(Eth1ChainError), BeaconStateError(BeaconStateError), StateAdvanceError(StateAdvanceError), @@ -252,7 +254,6 @@ pub enum BlockProductionError { FailedToReadFinalizedBlock(store::Error), MissingFinalizedBlock(Hash256), BlockTooLarge(usize), - ForkChoiceError(BeaconChainError), ShuttingDown, MissingSyncAggregate, MissingExecutionPayload, @@ -265,3 +266,4 @@ easy_from_to!(BeaconStateError, BlockProductionError); easy_from_to!(SlotProcessingError, BlockProductionError); easy_from_to!(Eth1ChainError, BlockProductionError); easy_from_to!(StateAdvanceError, BlockProductionError); +easy_from_to!(ForkChoiceError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 3d48dfd8f60..6d5b5ddc4ae 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -147,7 +147,8 @@ pub fn reset_fork_choice_to_finalization, Cold: It beacon_state: finalized_state, }; - let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), &finalized_snapshot); + let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store.clone(), &finalized_snapshot) + .map_err(|e| format!("Unable to reset fork choice store for revert: {e:?}"))?; let mut fork_choice = ForkChoice::from_anchor( fc_store, diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index a55532ac12f..ae1c5e4b766 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -48,8 +48,8 @@ pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - CountUnrealized, ForkChoiceError, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + CountUnrealized, ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, + StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index c681570b110..b52c4258fe7 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -77,6 +77,11 @@ lazy_static! { "beacon_block_processing_attestation_observation_seconds", "Time spent hashing and remembering all the attestations in the block" ); + pub static ref BLOCK_PROCESSING_FORK_CHOICE: Result = try_create_histogram_with_buckets( + "beacon_block_processing_fork_choice_seconds", + "Time spent running fork choice's `get_head` during block import", + exponential_buckets(1e-3, 2.0, 8) + ); pub static ref BLOCK_SYNC_AGGREGATE_SET_BITS: Result = try_create_int_gauge( "block_sync_aggregate_set_bits", "The number of true bits in the last sync aggregate in a block" @@ -99,6 +104,11 @@ lazy_static! { "beacon_block_production_fork_choice_seconds", "Time taken to run fork choice before block production" ); + pub static ref BLOCK_PRODUCTION_GET_PROPOSER_HEAD_TIMES: Result = try_create_histogram_with_buckets( + "beacon_block_production_get_proposer_head_times", + "Time taken for fork choice to compute the proposer head before block production", + exponential_buckets(1e-3, 2.0, 8) + ); pub static ref BLOCK_PRODUCTION_STATE_LOAD_TIMES: Result = try_create_histogram( "beacon_block_production_state_load_seconds", "Time taken to load the base state for block production" @@ -322,10 +332,26 @@ lazy_static! { "beacon_reorgs_total", "Count of occasions fork choice has switched to a different chain" ); - pub static ref FORK_CHOICE_TIMES: Result = - try_create_histogram("beacon_fork_choice_seconds", "Full runtime of fork choice"); - pub static ref FORK_CHOICE_FIND_HEAD_TIMES: Result = - try_create_histogram("beacon_fork_choice_find_head_seconds", "Full runtime of fork choice find_head function"); + pub static ref FORK_CHOICE_TIMES: Result = try_create_histogram_with_buckets( + "beacon_fork_choice_seconds", + "Full runtime of fork choice", + linear_buckets(10e-3, 20e-3, 10) + ); + pub static ref FORK_CHOICE_OVERRIDE_FCU_TIMES: Result = try_create_histogram_with_buckets( + "beacon_fork_choice_override_fcu_seconds", + "Time taken to compute the optional forkchoiceUpdated override", + exponential_buckets(1e-3, 2.0, 8) + ); + pub static ref FORK_CHOICE_AFTER_NEW_HEAD_TIMES: Result = try_create_histogram_with_buckets( + "beacon_fork_choice_after_new_head_seconds", + "Time taken to run `after_new_head`", + exponential_buckets(1e-3, 2.0, 10) + ); + pub static ref FORK_CHOICE_AFTER_FINALIZATION_TIMES: Result = try_create_histogram_with_buckets( + "beacon_fork_choice_after_finalization_seconds", + "Time taken to run `after_finalization`", + exponential_buckets(1e-3, 2.0, 10) + ); pub static ref FORK_CHOICE_PROCESS_BLOCK_TIMES: Result = try_create_histogram( "beacon_fork_choice_process_block_seconds", "Time taken to add a block and all attestations to fork choice" diff --git a/beacon_node/beacon_chain/src/proposer_prep_service.rs b/beacon_node/beacon_chain/src/proposer_prep_service.rs index 9cd177b3409..140a9659fce 100644 --- a/beacon_node/beacon_chain/src/proposer_prep_service.rs +++ b/beacon_node/beacon_chain/src/proposer_prep_service.rs @@ -5,13 +5,9 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tokio::time::sleep; -/// At 12s slot times, the means that the payload preparation routine will run 4s before the start -/// of each slot (`12 / 3 = 4`). -pub const PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR: u32 = 3; - /// Spawns a routine which ensures the EL is provided advance notice of any block producers. /// -/// This routine will run once per slot, at `slot_duration / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR` +/// This routine will run once per slot, at `chain.prepare_payload_lookahead()` /// before the start of each slot. /// /// The service will not be started if there is no `execution_layer` on the `chain`. @@ -38,8 +34,8 @@ async fn proposer_prep_service( loop { match chain.slot_clock.duration_to_next_slot() { Some(duration) => { - let additional_delay = slot_duration - - chain.slot_clock.slot_duration() / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR; + let additional_delay = + slot_duration.saturating_sub(chain.config.prepare_payload_lookahead); sleep(duration + additional_delay).await; debug!( @@ -65,14 +61,11 @@ async fn proposer_prep_service( }, "proposer_prep_update", ); - - continue; } None => { error!(chain.log, "Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. sleep(slot_duration).await; - continue; } }; } diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 40b73451cb0..d2846c08569 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -13,7 +13,10 @@ pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 4; /// The minimum block delay to clone the state in the cache instead of removing it. /// This helps keep block processing fast during re-orgs from late blocks. -const MINIMUM_BLOCK_DELAY_FOR_CLONE: Duration = Duration::from_secs(6); +fn minimum_block_delay_for_clone(seconds_per_slot: u64) -> Duration { + // If the block arrived at the attestation deadline or later, it might get re-orged. + Duration::from_secs(seconds_per_slot) / 3 +} /// This snapshot is to be used for verifying a child of `self.beacon_block`. #[derive(Debug)] @@ -256,7 +259,7 @@ impl SnapshotCache { if let Some(cache) = self.snapshots.get(i) { // Avoid cloning the block during sync (when the `block_delay` is `None`). if let Some(delay) = block_delay { - if delay >= MINIMUM_BLOCK_DELAY_FOR_CLONE + if delay >= minimum_block_delay_for_clone(spec.seconds_per_slot) && delay <= Duration::from_secs(spec.seconds_per_slot) * 4 || block_slot > cache.beacon_block.slot() + 1 { diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 72fc973e546..f73223fa540 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -16,6 +16,7 @@ use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::{ beacon_chain::{ATTESTATION_CACHE_LOCK_TIMEOUT, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT}, + chain_config::FORK_CHOICE_LOOKAHEAD_FACTOR, snapshot_cache::StateAdvance, BeaconChain, BeaconChainError, BeaconChainTypes, }; @@ -133,7 +134,7 @@ async fn state_advance_timer( // Run fork choice 23/24s of the way through the slot (11.5s on mainnet). // We need to run after the state advance, so use the same condition as above. - let fork_choice_offset = slot_duration / 24; + let fork_choice_offset = slot_duration / FORK_CHOICE_LOOKAHEAD_FACTOR; let fork_choice_instant = if duration_to_next_slot > state_advance_offset { Instant::now() + duration_to_next_slot - fork_choice_offset } else { @@ -224,8 +225,20 @@ async fn state_advance_timer( return; } + // Re-compute the head, dequeuing attestations for the current slot early. beacon_chain.recompute_head_at_slot(next_slot).await; + // Prepare proposers so that the node can send payload attributes in the case where + // it decides to abandon a proposer boost re-org. + if let Err(e) = beacon_chain.prepare_beacon_proposer(current_slot).await { + warn!( + log, + "Unable to prepare proposer with lookahead"; + "error" => ?e, + "slot" => next_slot, + ); + } + // Use a blocking task to avoid blocking the core executor whilst waiting for locks // in `ForkChoiceSignalTx`. beacon_chain.task_executor.clone().spawn_blocking( diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index b88966b41a9..d6e8787f4e0 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -32,7 +32,7 @@ use rand::SeedableRng; use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slog::Logger; -use slot_clock::TestingSlotClock; +use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::{ state_advance::{complete_state_advance, partial_state_advance}, @@ -319,6 +319,12 @@ where self } + pub fn logger(mut self, log: Logger) -> Self { + self.log = log.clone(); + self.runtime.set_logger(log); + self + } + /// This mutator will be run before the `store_mutator`. pub fn initial_mutator(mut self, mutator: BoxedMutator) -> Self { assert!( @@ -524,10 +530,9 @@ pub struct BeaconChainHarness { pub rng: Mutex, } -pub type HarnessAttestations = Vec<( - Vec<(Attestation, SubnetId)>, - Option>, -)>; +pub type CommitteeAttestations = Vec<(Attestation, SubnetId)>; +pub type HarnessAttestations = + Vec<(CommitteeAttestations, Option>)>; pub type HarnessSyncContributions = Vec<( Vec<(SyncCommitteeMessage, usize)>, @@ -778,6 +783,21 @@ where sk.sign(message) } + /// Sign a beacon block using the proposer's key. + pub fn sign_beacon_block( + &self, + block: BeaconBlock, + state: &BeaconState, + ) -> SignedBeaconBlock { + let proposer_index = block.proposer_index() as usize; + block.sign( + &self.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &self.spec, + ) + } + /// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to /// `beacon_block_root`. The provided `state` should match the `block.state_root` for the /// `block` identified by `beacon_block_root`. @@ -851,13 +871,35 @@ where state_root: Hash256, head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, - ) -> Vec, SubnetId)>> { + ) -> Vec> { + self.make_unaggregated_attestations_with_limit( + attesting_validators, + state, + state_root, + head_block_root, + attestation_slot, + None, + ) + .0 + } + + pub fn make_unaggregated_attestations_with_limit( + &self, + attesting_validators: &[usize], + state: &BeaconState, + state_root: Hash256, + head_block_root: SignedBeaconBlockHash, + attestation_slot: Slot, + limit: Option, + ) -> (Vec>, Vec) { let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap(); let fork = self .spec .fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch())); - state + let attesters = Mutex::new(vec![]); + + let attestations = state .get_beacon_committees_at_slot(attestation_slot) .expect("should get committees") .iter() @@ -869,6 +911,15 @@ where if !attesting_validators.contains(validator_index) { return None; } + + let mut attesters = attesters.lock(); + if let Some(limit) = limit { + if attesters.len() >= limit { + return None; + } + } + attesters.push(*validator_index); + let mut attestation = self .produce_unaggregated_attestation_for_block( attestation_slot, @@ -909,9 +960,19 @@ where Some((attestation, subnet_id)) }) - .collect() + .collect::>() }) - .collect() + .collect::>(); + + let attesters = attesters.into_inner(); + if let Some(limit) = limit { + assert_eq!( + limit, + attesters.len(), + "failed to generate `limit` attestations" + ); + } + (attestations, attesters) } /// A list of sync messages for the given state. @@ -1004,13 +1065,38 @@ where block_hash: SignedBeaconBlockHash, slot: Slot, ) -> HarnessAttestations { - let unaggregated_attestations = self.make_unaggregated_attestations( + self.make_attestations_with_limit( attesting_validators, state, state_root, block_hash, slot, - ); + None, + ) + .0 + } + + /// Produce exactly `limit` attestations. + /// + /// Return attestations and vec of validator indices that attested. + pub fn make_attestations_with_limit( + &self, + attesting_validators: &[usize], + state: &BeaconState, + state_root: Hash256, + block_hash: SignedBeaconBlockHash, + slot: Slot, + limit: Option, + ) -> (HarnessAttestations, Vec) { + let (unaggregated_attestations, attesters) = self + .make_unaggregated_attestations_with_limit( + attesting_validators, + state, + state_root, + block_hash, + slot, + limit, + ); let fork = self.spec.fork_at_epoch(slot.epoch(E::slots_per_epoch())); let aggregated_attestations: Vec>> = @@ -1029,7 +1115,7 @@ where .committee .iter() .find(|&validator_index| { - if !attesting_validators.contains(validator_index) { + if !attesters.contains(validator_index) { return false; } @@ -1080,10 +1166,13 @@ where }) .collect(); - unaggregated_attestations - .into_iter() - .zip(aggregated_attestations) - .collect() + ( + unaggregated_attestations + .into_iter() + .zip(aggregated_attestations) + .collect(), + attesters, + ) } pub fn make_sync_contributions( @@ -1736,6 +1825,12 @@ where self.chain.slot_clock.advance_slot(); } + /// Advance the clock to `lookahead` before the start of `slot`. + pub fn advance_to_slot_lookahead(&self, slot: Slot, lookahead: Duration) { + let time = self.chain.slot_clock.start_of(slot).unwrap() - lookahead; + self.chain.slot_clock.set_current_time(time); + } + /// Deprecated: Use make_block() instead /// /// Returns a newly created block, signed by the proposer for the given slot. diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index 19e8902a3e8..c8c47c99041 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -53,6 +53,7 @@ async fn merge_with_terminal_block_hash_override() { let harness = BeaconChainHarness::builder(E::default()) .spec(spec) + .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer() @@ -109,6 +110,7 @@ async fn base_altair_merge_with_terminal_block_after_fork() { let harness = BeaconChainHarness::builder(E::default()) .spec(spec) + .logger(logging::test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer() diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index d77cc19678d..0b9eaaee0f0 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -7,8 +7,9 @@ use beacon_chain::otb_verification_service::{ use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, StateSkipConfig, - WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, + OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ @@ -19,6 +20,7 @@ use execution_layer::{ use fork_choice::{ CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, }; +use logging::test_logger; use proto_array::{Error as ProtoArrayError, ExecutionStatus}; use slot_clock::SlotClock; use std::collections::HashMap; @@ -59,6 +61,7 @@ impl InvalidPayloadRig { let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec) + .logger(test_logger()) .deterministic_keypairs(VALIDATOR_COUNT) .mock_execution_layer() .fresh_ephemeral_store() @@ -383,7 +386,7 @@ impl InvalidPayloadRig { .fork_choice_write_lock() .get_head(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) { - Err(ForkChoiceError::ProtoArrayError(e)) if e.contains(s) => (), + Err(ForkChoiceError::ProtoArrayStringError(e)) if e.contains(s) => (), other => panic!("expected {} error, got {:?}", s, other), }; } @@ -978,6 +981,10 @@ async fn payload_preparation() { ) .await; + rig.harness.advance_to_slot_lookahead( + next_slot, + rig.harness.chain.config.prepare_payload_lookahead, + ); rig.harness .chain .prepare_beacon_proposer(rig.harness.chain.slot().unwrap()) @@ -1054,7 +1061,7 @@ async fn invalid_parent() { &rig.harness.chain.spec, CountUnrealized::True, ), - Err(ForkChoiceError::ProtoArrayError(message)) + Err(ForkChoiceError::ProtoArrayStringError(message)) if message.contains(&format!( "{:?}", ProtoArrayError::ParentExecutionStatusIsInvalid { @@ -1121,7 +1128,11 @@ async fn payload_preparation_before_transition_block() { .get_forkchoice_update_parameters(); rig.harness .chain - .update_execution_engine_forkchoice(current_slot, forkchoice_update_params) + .update_execution_engine_forkchoice( + current_slot, + forkchoice_update_params, + OverrideForkchoiceUpdate::Yes, + ) .await .unwrap(); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 75b865407e2..f3e937b2e5f 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -769,7 +769,11 @@ where runtime_context.executor.spawn( async move { let result = inner_chain - .update_execution_engine_forkchoice(current_slot, params) + .update_execution_engine_forkchoice( + current_slot, + params, + Default::default(), + ) .await; // No need to exit early if setting the head fails. It will be set again if/when the diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 2b0c3a4c983..560569c92f2 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -29,7 +29,7 @@ pub struct JsonResponseBody { pub id: serde_json::Value, } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(transparent)] pub struct TransparentJsonPayloadId(#[serde(with = "eth2_serde_utils::bytes_8_hex")] pub PayloadId); @@ -227,7 +227,7 @@ impl From> for ExecutionPayload { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonPayloadAttributesV1 { #[serde(with = "eth2_serde_utils::u64_hex_be")] @@ -270,7 +270,7 @@ impl From for PayloadAttributes { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonForkChoiceStateV1 { pub head_block_hash: ExecutionBlockHash, @@ -323,7 +323,7 @@ pub enum JsonPayloadStatusV1Status { InvalidBlockHash, } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonPayloadStatusV1 { pub status: JsonPayloadStatusV1Status, @@ -388,7 +388,7 @@ impl From for PayloadStatusV1 { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonForkchoiceUpdatedV1Response { pub payload_status: JsonPayloadStatusV1, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index dfce9745774..ec1415f80bb 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -514,6 +514,16 @@ impl ExecutionLayer { .contains_key(&proposer_index) } + /// Check if a proposer is registered as a local validator, *from a synchronous context*. + /// + /// This method MUST NOT be called from an async task. + pub fn has_proposer_preparation_data_blocking(&self, proposer_index: u64) -> bool { + self.inner + .proposer_preparation_data + .blocking_lock() + .contains_key(&proposer_index) + } + /// Returns the fee-recipient address that should be used to build a block pub async fn get_suggested_fee_recipient(&self, proposer_index: u64) -> Address { if let Some(preparation_data_entry) = @@ -1141,12 +1151,14 @@ impl ExecutionLayer { &[metrics::FORKCHOICE_UPDATED], ); - trace!( + debug!( self.log(), "Issuing engine_forkchoiceUpdated"; "finalized_block_hash" => ?finalized_block_hash, "justified_block_hash" => ?justified_block_hash, "head_block_hash" => ?head_block_hash, + "head_block_root" => ?head_block_root, + "current_slot" => current_slot, ); let next_slot = current_slot + 1; @@ -1762,6 +1774,20 @@ mod test { .await; } + #[tokio::test] + async fn test_forked_terminal_block() { + let runtime = TestRuntime::default(); + let (mock, block_hash) = MockExecutionLayer::default_params(runtime.task_executor.clone()) + .move_to_terminal_block() + .produce_forked_pow_block(); + assert!(mock + .el + .is_valid_terminal_pow_block_hash(block_hash, &mock.spec) + .await + .unwrap() + .unwrap()); + } + #[tokio::test] async fn finds_valid_terminal_block_hash() { let runtime = TestRuntime::default(); diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 3620a02dfbb..22dcb400708 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -105,13 +105,15 @@ pub struct PoWBlock { pub timestamp: u64, } -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct ExecutionBlockGenerator { /* * Common database */ + head_block: Option>, + finalized_block_hash: Option, blocks: HashMap>, - block_hashes: HashMap, + block_hashes: HashMap>, /* * PoW block parameters */ @@ -133,6 +135,8 @@ impl ExecutionBlockGenerator { terminal_block_hash: ExecutionBlockHash, ) -> Self { let mut gen = Self { + head_block: <_>::default(), + finalized_block_hash: <_>::default(), blocks: <_>::default(), block_hashes: <_>::default(), terminal_total_difficulty, @@ -149,13 +153,7 @@ impl ExecutionBlockGenerator { } pub fn latest_block(&self) -> Option> { - let hash = *self - .block_hashes - .iter() - .max_by_key(|(number, _)| *number) - .map(|(_, hash)| hash)?; - - self.block_by_hash(hash) + self.head_block.clone() } pub fn latest_execution_block(&self) -> Option { @@ -164,8 +162,18 @@ impl ExecutionBlockGenerator { } pub fn block_by_number(&self, number: u64) -> Option> { - let hash = *self.block_hashes.get(&number)?; - self.block_by_hash(hash) + // Get the latest canonical head block + let mut latest_block = self.latest_block()?; + loop { + let block_number = latest_block.block_number(); + if block_number < number { + return None; + } + if block_number == number { + return Some(latest_block); + } + latest_block = self.block_by_hash(latest_block.parent_hash())?; + } } pub fn execution_block_by_number(&self, number: u64) -> Option { @@ -226,10 +234,16 @@ impl ExecutionBlockGenerator { } pub fn insert_pow_block(&mut self, block_number: u64) -> Result<(), String> { + if let Some(finalized_block_hash) = self.finalized_block_hash { + return Err(format!( + "terminal block {} has been finalized. PoW chain has stopped building", + finalized_block_hash + )); + } let parent_hash = if block_number == 0 { ExecutionBlockHash::zero() - } else if let Some(hash) = self.block_hashes.get(&(block_number - 1)) { - *hash + } else if let Some(block) = self.block_by_number(block_number - 1) { + block.block_hash() } else { return Err(format!( "parent with block number {} not found", @@ -244,49 +258,118 @@ impl ExecutionBlockGenerator { parent_hash, )?; - self.insert_block(Block::PoW(block)) + // Insert block into block tree + self.insert_block(Block::PoW(block))?; + + // Set head + if let Some(head_total_difficulty) = + self.head_block.as_ref().and_then(|b| b.total_difficulty()) + { + if block.total_difficulty >= head_total_difficulty { + self.head_block = Some(Block::PoW(block)); + } + } else { + self.head_block = Some(Block::PoW(block)); + } + Ok(()) } - pub fn insert_block(&mut self, block: Block) -> Result<(), String> { + /// Insert a PoW block given the parent hash. + /// + /// Returns `Ok(hash)` of the inserted block. + /// Returns an error if the `parent_hash` does not exist in the block tree or + /// if the parent block is the terminal block. + pub fn insert_pow_block_by_hash( + &mut self, + parent_hash: ExecutionBlockHash, + unique_id: u64, + ) -> Result { + let parent_block = self.block_by_hash(parent_hash).ok_or_else(|| { + format!( + "Block corresponding to parent hash does not exist: {}", + parent_hash + ) + })?; + + let mut block = generate_pow_block( + self.terminal_total_difficulty, + self.terminal_block_number, + parent_block.block_number() + 1, + parent_hash, + )?; + + // Hack the block hash to make this block distinct from any other block with a different + // `unique_id` (the default is 0). + block.block_hash = ExecutionBlockHash::from_root(Hash256::from_low_u64_be(unique_id)); + block.block_hash = ExecutionBlockHash::from_root(block.tree_hash_root()); + + let hash = self.insert_block(Block::PoW(block))?; + + // Set head + if let Some(head_total_difficulty) = + self.head_block.as_ref().and_then(|b| b.total_difficulty()) + { + if block.total_difficulty >= head_total_difficulty { + self.head_block = Some(Block::PoW(block)); + } + } else { + self.head_block = Some(Block::PoW(block)); + } + Ok(hash) + } + + pub fn insert_block(&mut self, block: Block) -> Result { if self.blocks.contains_key(&block.block_hash()) { return Err(format!("{:?} is already known", block.block_hash())); - } else if self.block_hashes.contains_key(&block.block_number()) { - return Err(format!( - "block {} is already known, forking is not supported", - block.block_number() - )); - } else if block.block_number() != 0 && !self.blocks.contains_key(&block.parent_hash()) { + } else if block.parent_hash() != ExecutionBlockHash::zero() + && !self.blocks.contains_key(&block.parent_hash()) + { return Err(format!("parent block {:?} is unknown", block.parent_hash())); } - self.insert_block_without_checks(block) + Ok(self.insert_block_without_checks(block)) } - pub fn insert_block_without_checks(&mut self, block: Block) -> Result<(), String> { + pub fn insert_block_without_checks(&mut self, block: Block) -> ExecutionBlockHash { + let block_hash = block.block_hash(); self.block_hashes - .insert(block.block_number(), block.block_hash()); - self.blocks.insert(block.block_hash(), block); + .entry(block.block_number()) + .or_insert_with(Vec::new) + .push(block_hash); + self.blocks.insert(block_hash, block); - Ok(()) + block_hash } pub fn modify_last_block(&mut self, block_modifier: impl FnOnce(&mut Block)) { - if let Some((last_block_hash, block_number)) = - self.block_hashes.keys().max().and_then(|block_number| { - self.block_hashes - .get(block_number) - .map(|block| (block, *block_number)) + if let Some(last_block_hash) = self + .block_hashes + .iter_mut() + .max_by_key(|(block_number, _)| *block_number) + .and_then(|(_, block_hashes)| { + // Remove block hash, we will re-insert with the new block hash after modifying it. + block_hashes.pop() }) { - let mut block = self.blocks.remove(last_block_hash).unwrap(); + let mut block = self.blocks.remove(&last_block_hash).unwrap(); block_modifier(&mut block); + // Update the block hash after modifying the block match &mut block { Block::PoW(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), Block::PoS(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), } - self.block_hashes.insert(block_number, block.block_hash()); - self.blocks.insert(block.block_hash(), block); + + // Update head. + if self + .head_block + .as_ref() + .map_or(true, |head| head.block_hash() == last_block_hash) + { + self.head_block = Some(block.clone()); + } + + self.insert_block_without_checks(block); } } @@ -405,6 +488,17 @@ impl ExecutionBlockGenerator { } }; + self.head_block = Some( + self.blocks + .get(&forkchoice_state.head_block_hash) + .unwrap() + .clone(), + ); + + if forkchoice_state.finalized_block_hash != ExecutionBlockHash::zero() { + self.finalized_block_hash = Some(forkchoice_state.finalized_block_hash); + } + Ok(JsonForkchoiceUpdatedV1Response { payload_status: JsonPayloadStatusV1 { status: JsonPayloadStatusV1Status::Valid, diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index ac677bf331f..97c52357559 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -123,6 +123,14 @@ pub async fn handle_rpc( let forkchoice_state: JsonForkChoiceStateV1 = get_param(params, 0)?; let payload_attributes: Option = get_param(params, 1)?; + if let Some(hook_response) = ctx + .hook + .lock() + .on_forkchoice_updated(forkchoice_state.clone(), payload_attributes.clone()) + { + return Ok(serde_json::to_value(hook_response).unwrap()); + } + let head_block_hash = forkchoice_state.head_block_hash; // Canned responses set by block hash take priority. diff --git a/beacon_node/execution_layer/src/test_utils/hook.rs b/beacon_node/execution_layer/src/test_utils/hook.rs new file mode 100644 index 00000000000..a3748103e3e --- /dev/null +++ b/beacon_node/execution_layer/src/test_utils/hook.rs @@ -0,0 +1,27 @@ +use crate::json_structures::*; + +type ForkChoiceUpdatedHook = dyn Fn( + JsonForkChoiceStateV1, + Option, + ) -> Option + + Send + + Sync; + +#[derive(Default)] +pub struct Hook { + forkchoice_updated: Option>, +} + +impl Hook { + pub fn on_forkchoice_updated( + &self, + state: JsonForkChoiceStateV1, + payload_attributes: Option, + ) -> Option { + (self.forkchoice_updated.as_ref()?)(state, payload_attributes) + } + + pub fn set_forkchoice_updated_hook(&mut self, f: Box) { + self.forkchoice_updated = Some(f); + } +} diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 065abc93609..e9d4b2121be 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -234,6 +234,21 @@ impl MockExecutionLayer { self } + pub fn produce_forked_pow_block(self) -> (Self, ExecutionBlockHash) { + let head_block = self + .server + .execution_block_generator() + .latest_block() + .unwrap(); + + let block_hash = self + .server + .execution_block_generator() + .insert_pow_block_by_hash(head_block.parent_hash(), 1) + .unwrap(); + (self, block_hash) + } + pub async fn with_terminal_block<'a, U, V>(self, func: U) -> Self where U: Fn(ChainSpec, ExecutionLayer, Option) -> V, diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index f5066879a78..f18ecbe6226 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -23,6 +23,7 @@ use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; +pub use hook::Hook; pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; pub use mock_execution_layer::MockExecutionLayer; @@ -33,6 +34,7 @@ pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000; mod execution_block_generator; mod handle_rpc; +mod hook; mod mock_builder; mod mock_execution_layer; @@ -99,6 +101,7 @@ impl MockServer { static_new_payload_response: <_>::default(), static_forkchoice_updated_response: <_>::default(), static_get_block_by_hash_response: <_>::default(), + hook: <_>::default(), new_payload_statuses: <_>::default(), fcu_payload_statuses: <_>::default(), _phantom: PhantomData, @@ -359,8 +362,7 @@ impl MockServer { .write() // The EF tests supply blocks out of order, so we must import them "without checks" and // trust they form valid chains. - .insert_block_without_checks(block) - .unwrap() + .insert_block_without_checks(block); } pub fn get_block(&self, block_hash: ExecutionBlockHash) -> Option> { @@ -441,6 +443,7 @@ pub struct Context { pub static_new_payload_response: Arc>>, pub static_forkchoice_updated_response: Arc>>, pub static_get_block_by_hash_response: Arc>>>, + pub hook: Arc>, // Canned responses by block hash. // diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index ec1448df7b3..9d6ad4050bf 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -1,5 +1,5 @@ use beacon_chain::{ - test_utils::{BeaconChainHarness, EphemeralHarnessType}, + test_utils::{BeaconChainHarness, BoxedMutator, EphemeralHarnessType}, BeaconChain, BeaconChainTypes, }; use directory::DEFAULT_ROOT_DIR; @@ -12,6 +12,7 @@ use lighthouse_network::{ types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, ConnectedPoint, Enr, NetworkGlobals, PeerId, PeerManager, }; +use logging::test_logger; use network::{NetworkReceivers, NetworkSenders}; use sensitive_url::SensitiveUrl; use slog::Logger; @@ -19,6 +20,7 @@ use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use std::time::Duration; +use store::MemoryStore; use tokio::sync::oneshot; use types::{ChainSpec, EthSpec}; @@ -47,13 +49,30 @@ pub struct ApiServer> { pub external_peer_id: PeerId, } +type Mutator = BoxedMutator, MemoryStore>; + impl InteractiveTester { pub async fn new(spec: Option, validator_count: usize) -> Self { - let harness = BeaconChainHarness::builder(E::default()) + Self::new_with_mutator(spec, validator_count, None).await + } + + pub async fn new_with_mutator( + spec: Option, + validator_count: usize, + mutator: Option>, + ) -> Self { + let mut harness_builder = BeaconChainHarness::builder(E::default()) .spec_or_default(spec) .deterministic_keypairs(validator_count) - .fresh_ephemeral_store() - .build(); + .logger(test_logger()) + .mock_execution_layer() + .fresh_ephemeral_store(); + + if let Some(mutator) = mutator { + harness_builder = harness_builder.initial_mutator(mutator); + } + + let harness = harness_builder.build(); let ApiServer { server, diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index b3227d7723a..17a3624afed 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,9 +1,22 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` use crate::common::*; -use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; +use beacon_chain::{ + chain_config::ReOrgThreshold, + test_utils::{AttestationStrategy, BlockStrategy}, +}; use eth2::types::DepositContractData; +use execution_layer::{ForkChoiceState, PayloadAttributes}; +use parking_lot::Mutex; +use slot_clock::SlotClock; +use state_processing::state_advance::complete_state_advance; +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Duration; use tree_hash::TreeHash; -use types::{EthSpec, FullPayload, MainnetEthSpec, Slot}; +use types::{ + Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, + MainnetEthSpec, ProposerPreparationData, Slot, +}; type E = MainnetEthSpec; @@ -33,6 +46,495 @@ async fn deposit_contract_custom_network() { assert_eq!(result, expected); } +/// Data structure for tracking fork choice updates received by the mock execution layer. +#[derive(Debug, Default)] +struct ForkChoiceUpdates { + updates: HashMap>, +} + +#[derive(Debug, Clone)] +struct ForkChoiceUpdateMetadata { + received_at: Duration, + state: ForkChoiceState, + payload_attributes: Option, +} + +impl ForkChoiceUpdates { + fn insert(&mut self, update: ForkChoiceUpdateMetadata) { + self.updates + .entry(update.state.head_block_hash) + .or_insert_with(Vec::new) + .push(update); + } + + fn contains_update_for(&self, block_hash: ExecutionBlockHash) -> bool { + self.updates.contains_key(&block_hash) + } + + /// Find the first fork choice update for `head_block_hash` with payload attributes for a + /// block proposal at `proposal_timestamp`. + fn first_update_with_payload_attributes( + &self, + head_block_hash: ExecutionBlockHash, + proposal_timestamp: u64, + ) -> Option { + self.updates + .get(&head_block_hash)? + .iter() + .find(|update| { + update + .payload_attributes + .as_ref() + .map_or(false, |payload_attributes| { + payload_attributes.timestamp == proposal_timestamp + }) + }) + .cloned() + } +} + +pub struct ReOrgTest { + head_slot: Slot, + /// Number of slots between parent block and canonical head. + parent_distance: u64, + /// Number of slots between head block and block proposal slot. + head_distance: u64, + re_org_threshold: u64, + max_epochs_since_finalization: u64, + percent_parent_votes: usize, + percent_empty_votes: usize, + percent_head_votes: usize, + should_re_org: bool, + misprediction: bool, +} + +impl Default for ReOrgTest { + /// Default config represents a regular easy re-org. + fn default() -> Self { + Self { + head_slot: Slot::new(30), + parent_distance: 1, + head_distance: 1, + re_org_threshold: 20, + max_epochs_since_finalization: 2, + percent_parent_votes: 100, + percent_empty_votes: 100, + percent_head_votes: 0, + should_re_org: true, + misprediction: false, + } + } +} + +// Test that the beacon node will try to perform proposer boost re-orgs on late blocks when +// configured. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_zero_weight() { + proposer_boost_re_org_test(ReOrgTest::default()).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_epoch_boundary() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(31), + should_re_org: false, + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_slot_after_epoch_boundary() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(33), + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_bad_ffg() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(64 + 22), + should_re_org: false, + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_no_finality() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(96), + percent_parent_votes: 100, + percent_empty_votes: 0, + percent_head_votes: 100, + should_re_org: false, + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_finality() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(129), + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_parent_distance() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(30), + parent_distance: 2, + should_re_org: false, + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_head_distance() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(29), + head_distance: 2, + should_re_org: false, + ..Default::default() + }) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_very_unhealthy() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(31), + parent_distance: 2, + head_distance: 2, + percent_parent_votes: 10, + percent_empty_votes: 10, + percent_head_votes: 10, + should_re_org: false, + ..Default::default() + }) + .await; +} + +/// The head block is late but still receives 30% of the committee vote, leading to a misprediction. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn proposer_boost_re_org_weight_misprediction() { + proposer_boost_re_org_test(ReOrgTest { + head_slot: Slot::new(30), + percent_empty_votes: 70, + percent_head_votes: 30, + should_re_org: false, + misprediction: true, + ..Default::default() + }) + .await; +} + +/// Run a proposer boost re-org test. +/// +/// - `head_slot`: the slot of the canonical head to be reorged +/// - `reorg_threshold`: committee percentage value for reorging +/// - `num_empty_votes`: percentage of comm of attestations for the parent block +/// - `num_head_votes`: number of attestations for the head block +/// - `should_re_org`: whether the proposer should build on the parent rather than the head +pub async fn proposer_boost_re_org_test( + ReOrgTest { + head_slot, + parent_distance, + head_distance, + re_org_threshold, + max_epochs_since_finalization, + percent_parent_votes, + percent_empty_votes, + percent_head_votes, + should_re_org, + misprediction, + }: ReOrgTest, +) { + assert!(head_slot > 0); + + // We require a network with execution enabled so we can check EL message timings. + let mut spec = ForkName::Merge.make_genesis_spec(E::default_spec()); + spec.terminal_total_difficulty = 1.into(); + + // Ensure there are enough validators to have `attesters_per_slot`. + let attesters_per_slot = 10; + let validator_count = E::slots_per_epoch() as usize * attesters_per_slot; + let all_validators = (0..validator_count).collect::>(); + let num_initial = head_slot.as_u64().checked_sub(parent_distance + 1).unwrap(); + + // Check that the required vote percentages can be satisfied exactly using `attesters_per_slot`. + assert_eq!(100 % attesters_per_slot, 0); + let percent_per_attester = 100 / attesters_per_slot; + assert_eq!(percent_parent_votes % percent_per_attester, 0); + assert_eq!(percent_empty_votes % percent_per_attester, 0); + assert_eq!(percent_head_votes % percent_per_attester, 0); + let num_parent_votes = Some(attesters_per_slot * percent_parent_votes / 100); + let num_empty_votes = Some(attesters_per_slot * percent_empty_votes / 100); + let num_head_votes = Some(attesters_per_slot * percent_head_votes / 100); + + let tester = InteractiveTester::::new_with_mutator( + Some(spec), + validator_count, + Some(Box::new(move |builder| { + builder + .proposer_re_org_threshold(Some(ReOrgThreshold(re_org_threshold))) + .proposer_re_org_max_epochs_since_finalization(Epoch::new( + max_epochs_since_finalization, + )) + })), + ) + .await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + let execution_ctx = mock_el.server.ctx.clone(); + let slot_clock = &harness.chain.slot_clock; + + // Move to terminal block. + mock_el.server.all_payloads_valid(); + execution_ctx + .execution_block_generator + .write() + .move_to_terminal_block() + .unwrap(); + + // Send proposer preparation data for all validators. + let proposer_preparation_data = all_validators + .iter() + .map(|i| ProposerPreparationData { + validator_index: *i as u64, + fee_recipient: Address::from_low_u64_be(*i as u64), + }) + .collect::>(); + harness + .chain + .execution_layer + .as_ref() + .unwrap() + .update_proposer_preparation( + head_slot.epoch(E::slots_per_epoch()) + 1, + &proposer_preparation_data, + ) + .await; + + // Create some chain depth. + harness.advance_slot(); + harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Start collecting fork choice updates. + let forkchoice_updates = Arc::new(Mutex::new(ForkChoiceUpdates::default())); + let forkchoice_updates_inner = forkchoice_updates.clone(); + let chain_inner = harness.chain.clone(); + + execution_ctx + .hook + .lock() + .set_forkchoice_updated_hook(Box::new(move |state, payload_attributes| { + let received_at = chain_inner.slot_clock.now_duration().unwrap(); + let state = ForkChoiceState::from(state); + let payload_attributes = payload_attributes.map(Into::into); + let update = ForkChoiceUpdateMetadata { + received_at, + state, + payload_attributes, + }; + forkchoice_updates_inner.lock().insert(update); + None + })); + + // We set up the following block graph, where B is a block that arrives late and is re-orged + // by C. + // + // A | B | - | + // ^ | - | C | + + let slot_a = Slot::new(num_initial + 1); + let slot_b = slot_a + parent_distance; + let slot_c = slot_b + head_distance; + + harness.advance_slot(); + let (block_a_root, block_a, state_a) = harness + .add_block_at_slot(slot_a, harness.get_current_state()) + .await + .unwrap(); + + // Attest to block A during slot A. + let (block_a_parent_votes, _) = harness.make_attestations_with_limit( + &all_validators, + &state_a, + state_a.canonical_root(), + block_a_root, + slot_a, + num_parent_votes, + ); + harness.process_attestations(block_a_parent_votes); + + // Attest to block A during slot B. + for _ in 0..parent_distance { + harness.advance_slot(); + } + let (block_a_empty_votes, block_a_attesters) = harness.make_attestations_with_limit( + &all_validators, + &state_a, + state_a.canonical_root(), + block_a_root, + slot_b, + num_empty_votes, + ); + harness.process_attestations(block_a_empty_votes); + + let remaining_attesters = all_validators + .iter() + .copied() + .filter(|index| !block_a_attesters.contains(index)) + .collect::>(); + + // Produce block B and process it halfway through the slot. + let (block_b, mut state_b) = harness.make_block(state_a.clone(), slot_b).await; + let block_b_root = block_b.canonical_root(); + + let obs_time = slot_clock.start_of(slot_b).unwrap() + slot_clock.slot_duration() / 2; + slot_clock.set_current_time(obs_time); + harness.chain.block_times_cache.write().set_time_observed( + block_b_root, + slot_b, + obs_time, + None, + None, + ); + harness.process_block_result(block_b.clone()).await.unwrap(); + + // Add attestations to block B. + let (block_b_head_votes, _) = harness.make_attestations_with_limit( + &remaining_attesters, + &state_b, + state_b.canonical_root(), + block_b_root.into(), + slot_b, + num_head_votes, + ); + harness.process_attestations(block_b_head_votes); + + let payload_lookahead = harness.chain.config.prepare_payload_lookahead; + let fork_choice_lookahead = Duration::from_millis(500); + while harness.get_current_slot() != slot_c { + let current_slot = harness.get_current_slot(); + let next_slot = current_slot + 1; + + // Simulate the scheduled call to prepare proposers at 8 seconds into the slot. + harness.advance_to_slot_lookahead(next_slot, payload_lookahead); + harness + .chain + .prepare_beacon_proposer(current_slot) + .await + .unwrap(); + + // Simulate the scheduled call to fork choice + prepare proposers 500ms before the + // next slot. + harness.advance_to_slot_lookahead(next_slot, fork_choice_lookahead); + harness.chain.recompute_head_at_slot(next_slot).await; + harness + .chain + .prepare_beacon_proposer(current_slot) + .await + .unwrap(); + + harness.advance_slot(); + harness.chain.per_slot_task().await; + } + + // Produce block C. + // Advance state_b so we can get the proposer. + complete_state_advance(&mut state_b, None, slot_c, &harness.chain.spec).unwrap(); + + let proposer_index = state_b + .get_beacon_proposer_index(slot_c, &harness.chain.spec) + .unwrap(); + let randao_reveal = harness + .sign_randao_reveal(&state_b, proposer_index, slot_c) + .into(); + let unsigned_block_c = tester + .client + .get_validator_blocks(slot_c, &randao_reveal, None) + .await + .unwrap() + .data; + let block_c = harness.sign_beacon_block(unsigned_block_c, &state_b); + + if should_re_org { + // Block C should build on A. + assert_eq!(block_c.parent_root(), block_a_root.into()); + } else { + // Block C should build on B. + assert_eq!(block_c.parent_root(), block_b_root); + } + + // Applying block C should cause it to become head regardless (re-org or continuation). + let block_root_c = harness + .process_block_result(block_c.clone()) + .await + .unwrap() + .into(); + assert_eq!(harness.head_block_root(), block_root_c); + + // Check the fork choice updates that were sent. + let forkchoice_updates = forkchoice_updates.lock(); + let block_a_exec_hash = block_a.message().execution_payload().unwrap().block_hash(); + let block_b_exec_hash = block_b.message().execution_payload().unwrap().block_hash(); + + let block_c_timestamp = block_c.message().execution_payload().unwrap().timestamp(); + + // If we re-orged then no fork choice update for B should have been sent. + assert_eq!( + should_re_org, + !forkchoice_updates.contains_update_for(block_b_exec_hash), + "{block_b_exec_hash:?}" + ); + + // Check the timing of the first fork choice update with payload attributes for block C. + let c_parent_hash = if should_re_org { + block_a_exec_hash + } else { + block_b_exec_hash + }; + let first_update = forkchoice_updates + .first_update_with_payload_attributes(c_parent_hash, block_c_timestamp) + .unwrap(); + let payload_attribs = first_update.payload_attributes.as_ref().unwrap(); + + let lookahead = slot_clock + .start_of(slot_c) + .unwrap() + .checked_sub(first_update.received_at) + .unwrap(); + + if !misprediction { + assert_eq!( + lookahead, payload_lookahead, + "lookahead={lookahead:?}, timestamp={}, prev_randao={:?}", + payload_attribs.timestamp, payload_attribs.prev_randao, + ); + } else { + // On a misprediction we issue the first fcU 500ms before creating a block! + assert_eq!( + lookahead, fork_choice_lookahead, + "timestamp={}, prev_randao={:?}", + payload_attribs.timestamp, payload_attribs.prev_randao, + ); + } +} + // Test that running fork choice before proposing results in selection of the correct head. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn fork_choice_before_proposal() { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 44a995176d1..c5f4cc8adf4 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -761,6 +761,38 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { experimental as it may obscure performance issues.") .takes_value(false) ) + .arg( + Arg::with_name("disable-proposer-reorgs") + .long("disable-proposer-reorgs") + .help("Do not attempt to reorg late blocks from other validators when proposing.") + .takes_value(false) + ) + .arg( + Arg::with_name("proposer-reorg-threshold") + .long("proposer-reorg-threshold") + .value_name("PERCENT") + .help("Percentage of vote weight below which to attempt a proposer reorg. \ + Default: 20%") + .conflicts_with("disable-proposer-reorgs") + ) + .arg( + Arg::with_name("proposer-reorg-epochs-since-finalization") + .long("proposer-reorg-epochs-since-finalization") + .value_name("EPOCHS") + .help("Maximum number of epochs since finalization at which proposer reorgs are \ + allowed. Default: 2") + .conflicts_with("disable-proposer-reorgs") + ) + .arg( + Arg::with_name("prepare-payload-lookahead") + .long("prepare-payload-lookahead") + .value_name("MILLISECONDS") + .help("The time before the start of a proposal slot at which payload attributes \ + should be sent. Low values are useful for execution nodes which don't \ + improve their payload after the first call, and high values are useful \ + for ensuring the EL is given ample notice. Default: 1/3 of a slot.") + .takes_value(true) + ) .arg( Arg::with_name("fork-choice-before-proposal-timeout") .long("fork-choice-before-proposal-timeout") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index e98b585f5f3..b3bfa696edf 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,3 +1,7 @@ +use beacon_chain::chain_config::{ + ReOrgThreshold, DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR, + DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, +}; use clap::ArgMatches; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; use client::{ClientConfig, ClientGenesis}; @@ -18,6 +22,7 @@ use std::net::Ipv6Addr; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::path::{Path, PathBuf}; use std::str::FromStr; +use std::time::Duration; use types::{Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes, GRAFFITI_BYTES_LEN}; use unused_port::{unused_tcp_port, unused_udp_port}; @@ -674,11 +679,32 @@ pub fn get_config( client_config.chain.enable_lock_timeouts = false; } + if cli_args.is_present("disable-proposer-reorgs") { + client_config.chain.re_org_threshold = None; + } else { + client_config.chain.re_org_threshold = Some( + clap_utils::parse_optional(cli_args, "proposer-reorg-threshold")? + .map(ReOrgThreshold) + .unwrap_or(DEFAULT_RE_ORG_THRESHOLD), + ); + client_config.chain.re_org_max_epochs_since_finalization = + clap_utils::parse_optional(cli_args, "proposer-reorg-epochs-since-finalization")? + .unwrap_or(DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION); + } + // Note: This overrides any previous flags that enable this option. if cli_args.is_present("disable-deposit-contract-sync") { client_config.sync_eth1_chain = false; } + client_config.chain.prepare_payload_lookahead = + clap_utils::parse_optional(cli_args, "prepare-payload-lookahead")? + .map(Duration::from_millis) + .unwrap_or_else(|| { + Duration::from_secs(spec.seconds_per_slot) + / DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR + }); + if let Some(timeout) = clap_utils::parse_optional(cli_args, "fork-choice-before-proposal-timeout")? { diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index a43fa10e649..470407ebee9 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -47,6 +47,7 @@ * [Release Candidates](./advanced-release-candidates.md) * [MEV and Lighthouse](./builders.md) * [Merge Migration](./merge-migration.md) + * [Late Block Re-orgs](./late-block-re-orgs.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/builders.md b/book/src/builders.md index 99fae5b3e76..f2a4b3936a5 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -200,19 +200,23 @@ for `INFO` and `WARN` messages indicating why the builder was not used. Examples of messages indicating fallback to a locally produced block are: ``` -INFO No payload provided by connected builder. +INFO Builder did not return a payload ``` ``` -WARN Unable to retrieve a payload from a connected builder +WARN Builder error when requesting payload ``` ``` -INFO The value offered by the connected builder does not meet the configured profit threshold. +WARN Builder returned invalid payload ``` ``` -INFO Due to poor chain health the local execution engine will be used for payload construction. +INFO Builder payload ignored +``` + +``` +INFO Chain is unhealthy, using local payload ``` In case of fallback you should see a log indicating that the locally produced payload was diff --git a/book/src/late-block-re-orgs.md b/book/src/late-block-re-orgs.md new file mode 100644 index 00000000000..0014af8f152 --- /dev/null +++ b/book/src/late-block-re-orgs.md @@ -0,0 +1,60 @@ +# Late Block Re-orgs + +Since v3.4.0 Lighthouse will opportunistically re-org late blocks when proposing. + +This feature is intended to disincentivise late blocks and improve network health. Proposing a +re-orging block is also more profitable for the proposer because it increases the number of +attestations and transactions that can be included. + +## Command line flags + +There are three flags which control the re-orging behaviour: + +* `--disable-proposer-reorgs`: turn re-orging off (it's on by default). +* `--proposer-reorg-threshold N`: attempt to orphan blocks with less than N% of the committee vote. If this parameter isn't set then N defaults to 20% when the feature is enabled. +* `--proposer-reorg-epochs-since-finalization N`: only attempt to re-org late blocks when the number of epochs since finalization is less than or equal to N. The default is 2 epochs, + meaning re-orgs will only be attempted when the chain is finalizing optimally. + +All flags should be applied to `lighthouse bn`. The default configuration is recommended as it +balances the chance of the re-org succeeding against the chance of failure due to attestations +arriving late and making the re-org block non-viable. + +## Safeguards + +To prevent excessive re-orgs there are several safeguards in place that limit when a re-org +will be attempted. + +The full conditions are described in [the spec][] but the most important ones are: + +* Only single-slot re-orgs: Lighthouse will build a block at N + 1 to re-org N by building on the + parent N - 1. The result is a chain with exactly one skipped slot. +* No epoch boundaries: to ensure that the selected proposer does not change, Lighthouse will + not propose a re-orging block in the 0th slot of an epoch. + +## Logs + +You can track the reasons for re-orgs being attempted (or not) via Lighthouse's logs. + +A pair of messages at `INFO` level will be logged if a re-org opportunity is detected: + +> INFO Attempting re-org due to weak head threshold_weight: 45455983852725, head_weight: 0, parent: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, weak_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 + +> INFO Proposing block to re-org current head head_to_reorg: 0xf64f…2b49, slot: 1105320 + +This should be followed shortly after by a `WARN` log indicating that a re-org occurred. This is +expected and normal: + +> WARN Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 + +In case a re-org is not viable (which should be most of the time), Lighthouse will just propose a +block as normal and log the reason the re-org was not attempted at debug level: + +> DEBG Not attempting re-org reason: head not late + +If you are interested in digging into the timing of `forkchoiceUpdated` messages sent to the +execution layer, there is also a debug log for the suppression of `forkchoiceUpdated` messages +when Lighthouse thinks that a re-org is likely: + +> DEBG Fork choice update overridden slot: 1105320, override: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, canonical_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 + +[the spec]: https://github.com/ethereum/consensus-specs/pull/3034 diff --git a/common/task_executor/src/test_utils.rs b/common/task_executor/src/test_utils.rs index 7d59cdf022c..c6e5ad01e68 100644 --- a/common/task_executor/src/test_utils.rs +++ b/common/task_executor/src/test_utils.rs @@ -60,6 +60,13 @@ impl Drop for TestRuntime { } } +impl TestRuntime { + pub fn set_logger(&mut self, log: Logger) { + self.log = log.clone(); + self.task_executor.log = log; + } +} + pub fn null_logger() -> Result { let log_builder = NullLoggerBuilder; log_builder diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 7f12e1d8973..290cef78ab5 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,6 +1,7 @@ use crate::{ForkChoiceStore, InvalidationOperation}; use proto_array::{ - Block as ProtoBlock, CountUnrealizedFull, ExecutionStatus, ProtoArrayForkChoice, + Block as ProtoBlock, CountUnrealizedFull, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, + ProtoArrayForkChoice, ReOrgThreshold, }; use slog::{crit, debug, warn, Logger}; use ssz_derive::{Decode, Encode}; @@ -22,7 +23,8 @@ pub enum Error { InvalidAttestation(InvalidAttestation), InvalidAttesterSlashing(AttesterSlashingValidationError), InvalidBlock(InvalidBlock), - ProtoArrayError(String), + ProtoArrayStringError(String), + ProtoArrayError(proto_array::Error), InvalidProtoArrayBytes(String), InvalidLegacyProtoArrayBytes(String), FailedToProcessInvalidExecutionPayload(String), @@ -44,6 +46,7 @@ pub enum Error { ForkChoiceStoreError(T), UnableToSetJustifiedCheckpoint(T), AfterBlockFailed(T), + ProposerHeadError(T), InvalidAnchor { block_slot: Slot, state_slot: Slot, @@ -59,6 +62,13 @@ pub enum Error { MissingFinalizedBlock { finalized_checkpoint: Checkpoint, }, + WrongSlotForGetProposerHead { + current_slot: Slot, + fc_store_slot: Slot, + }, + ProposerBoostNotExpiredForGetProposerHead { + proposer_boost_root: Hash256, + }, UnrealizedVoteProcessing(state_processing::EpochProcessingError), ParticipationCacheBuild(BeaconStateError), ValidatorStatuses(BeaconStateError), @@ -153,6 +163,12 @@ pub enum InvalidAttestation { impl From for Error { fn from(e: String) -> Self { + Error::ProtoArrayStringError(e) + } +} + +impl From for Error { + fn from(e: proto_array::Error) -> Self { Error::ProtoArrayError(e) } } @@ -554,6 +570,69 @@ where Ok(head_root) } + /// Get the block to build on as proposer, taking into account proposer re-orgs. + /// + /// You *must* call `get_head` for the proposal slot prior to calling this function and pass + /// in the result of `get_head` as `canonical_head`. + pub fn get_proposer_head( + &self, + current_slot: Slot, + canonical_head: Hash256, + re_org_threshold: ReOrgThreshold, + max_epochs_since_finalization: Epoch, + ) -> Result>> { + // Ensure that fork choice has already been updated for the current slot. This prevents + // us from having to take a write lock or do any dequeueing of attestations in this + // function. + let fc_store_slot = self.fc_store.get_current_slot(); + if current_slot != fc_store_slot { + return Err(ProposerHeadError::Error( + Error::WrongSlotForGetProposerHead { + current_slot, + fc_store_slot, + }, + )); + } + + // Similarly, the proposer boost for the previous head should already have expired. + let proposer_boost_root = self.fc_store.proposer_boost_root(); + if !proposer_boost_root.is_zero() { + return Err(ProposerHeadError::Error( + Error::ProposerBoostNotExpiredForGetProposerHead { + proposer_boost_root, + }, + )); + } + + self.proto_array + .get_proposer_head::( + current_slot, + canonical_head, + self.fc_store.justified_balances(), + re_org_threshold, + max_epochs_since_finalization, + ) + .map_err(ProposerHeadError::convert_inner_error) + } + + pub fn get_preliminary_proposer_head( + &self, + canonical_head: Hash256, + re_org_threshold: ReOrgThreshold, + max_epochs_since_finalization: Epoch, + ) -> Result>> { + let current_slot = self.fc_store.get_current_slot(); + self.proto_array + .get_proposer_head_info::( + current_slot, + canonical_head, + self.fc_store.justified_balances(), + re_org_threshold, + max_epochs_since_finalization, + ) + .map_err(ProposerHeadError::convert_inner_error) + } + /// Return information about: /// /// - The LMD head of the chain. diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 9604e254754..60c58859ed8 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -1,3 +1,4 @@ +use proto_array::JustifiedBalances; use std::collections::BTreeSet; use std::fmt::Debug; use types::{BeaconBlockRef, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; @@ -44,7 +45,7 @@ pub trait ForkChoiceStore: Sized { fn justified_checkpoint(&self) -> &Checkpoint; /// Returns balances from the `state` identified by `justified_checkpoint.root`. - fn justified_balances(&self) -> &[u64]; + fn justified_balances(&self) -> &JustifiedBalances; /// Returns the `best_justified_checkpoint`. fn best_justified_checkpoint(&self) -> &Checkpoint; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 850f7c4a120..00bd1f763dc 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -378,9 +378,13 @@ impl ForkChoiceTest { assert_eq!( &balances[..], - fc.fc_store().justified_balances(), + &fc.fc_store().justified_balances().effective_balances, "balances should match" - ) + ); + assert_eq!( + balances.iter().sum::(), + fc.fc_store().justified_balances().total_effective_balance + ); } /// Returns an attestation that is valid for some slot in the given `chain`. diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index ad79ecc1e6b..dfab6fda567 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -15,3 +15,4 @@ eth2_ssz_derive = "0.3.0" serde = "1.0.116" serde_derive = "1.0.116" serde_yaml = "0.8.13" +safe_arith = { path = "../safe_arith" } diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 826bf6c3a79..c55739da792 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -1,3 +1,4 @@ +use safe_arith::ArithError; use types::{Checkpoint, Epoch, ExecutionBlockHash, Hash256, Slot}; #[derive(Clone, PartialEq, Debug)] @@ -15,6 +16,7 @@ pub enum Error { InvalidNodeDelta(usize), DeltaOverflow(usize), ProposerBoostOverflow(usize), + ReOrgThresholdOverflow, IndexOverflow(&'static str), InvalidExecutionDeltaOverflow(usize), InvalidDeltaLen { @@ -48,6 +50,13 @@ pub enum Error { block_root: Hash256, parent_root: Hash256, }, + Arith(ArithError), +} + +impl From for Error { + fn from(e: ArithError) -> Self { + Error::Arith(e) + } } #[derive(Clone, PartialEq, Debug)] diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index ba6f3170dc1..035fb799eea 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -5,7 +5,7 @@ mod votes; use crate::proto_array::CountUnrealizedFull; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; -use crate::InvalidationOperation; +use crate::{InvalidationOperation, JustifiedBalances}; use serde_derive::{Deserialize, Serialize}; use std::collections::BTreeSet; use types::{ @@ -101,11 +101,14 @@ impl ForkChoiceTestDefinition { justified_state_balances, expected_head, } => { + let justified_balances = + JustifiedBalances::from_effective_balances(justified_state_balances) + .unwrap(); let head = fork_choice .find_head::( justified_checkpoint, finalized_checkpoint, - &justified_state_balances, + &justified_balances, Hash256::zero(), &equivocating_indices, Slot::new(0), @@ -129,11 +132,14 @@ impl ForkChoiceTestDefinition { expected_head, proposer_boost_root, } => { + let justified_balances = + JustifiedBalances::from_effective_balances(justified_state_balances) + .unwrap(); let head = fork_choice .find_head::( justified_checkpoint, finalized_checkpoint, - &justified_state_balances, + &justified_balances, proposer_boost_root, &equivocating_indices, Slot::new(0), @@ -155,10 +161,13 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, justified_state_balances, } => { + let justified_balances = + JustifiedBalances::from_effective_balances(justified_state_balances) + .unwrap(); let result = fork_choice.find_head::( justified_checkpoint, finalized_checkpoint, - &justified_state_balances, + &justified_balances, Hash256::zero(), &equivocating_indices, Slot::new(0), diff --git a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs index f1b0e512d7d..ede5bb39481 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs @@ -999,7 +999,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { }); ops.push(Operation::AssertWeight { block_root: get_root(3), - // This is a "magic number" generated from `calculate_proposer_boost`. + // This is a "magic number" generated from `calculate_committee_fraction`. weight: 31_000, }); diff --git a/consensus/proto_array/src/justified_balances.rs b/consensus/proto_array/src/justified_balances.rs new file mode 100644 index 00000000000..75f6c2f7c80 --- /dev/null +++ b/consensus/proto_array/src/justified_balances.rs @@ -0,0 +1,62 @@ +use safe_arith::{ArithError, SafeArith}; +use types::{BeaconState, EthSpec}; + +#[derive(Debug, PartialEq, Clone, Default)] +pub struct JustifiedBalances { + /// The effective balances for every validator in a given justified state. + /// + /// Any validator who is not active in the epoch of the justified state is assigned a balance of + /// zero. + pub effective_balances: Vec, + /// The sum of `self.effective_balances`. + pub total_effective_balance: u64, + /// The number of active validators included in `self.effective_balances`. + pub num_active_validators: u64, +} + +impl JustifiedBalances { + pub fn from_justified_state(state: &BeaconState) -> Result { + let current_epoch = state.current_epoch(); + let mut total_effective_balance = 0u64; + let mut num_active_validators = 0u64; + + let effective_balances = state + .validators() + .iter() + .map(|validator| { + if validator.is_active_at(current_epoch) { + total_effective_balance.safe_add_assign(validator.effective_balance)?; + num_active_validators.safe_add_assign(1)?; + + Ok(validator.effective_balance) + } else { + Ok(0) + } + }) + .collect::, _>>()?; + + Ok(Self { + effective_balances, + total_effective_balance, + num_active_validators, + }) + } + + pub fn from_effective_balances(effective_balances: Vec) -> Result { + let mut total_effective_balance = 0; + let mut num_active_validators = 0; + + for &balance in &effective_balances { + if balance != 0 { + total_effective_balance.safe_add_assign(balance)?; + num_active_validators.safe_add_assign(1)?; + } + } + + Ok(Self { + effective_balances, + total_effective_balance, + num_active_validators, + }) + } +} diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index e7bd9c0ed56..f2b29e1c7b2 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -1,11 +1,18 @@ mod error; pub mod fork_choice_test_definition; +mod justified_balances; mod proto_array; mod proto_array_fork_choice; mod ssz_container; -pub use crate::proto_array::{CountUnrealizedFull, InvalidationOperation}; -pub use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; +pub use crate::justified_balances::JustifiedBalances; +pub use crate::proto_array::{ + calculate_committee_fraction, CountUnrealizedFull, InvalidationOperation, +}; +pub use crate::proto_array_fork_choice::{ + Block, DoNotReOrg, ExecutionStatus, ProposerHeadError, ProposerHeadInfo, ProtoArrayForkChoice, + ReOrgThreshold, +}; pub use error::Error; pub mod core { diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 590407d7eb8..add84f54787 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,5 +1,5 @@ use crate::error::InvalidBestNodeInfo; -use crate::{error::Error, Block, ExecutionStatus}; +use crate::{error::Error, Block, ExecutionStatus, JustifiedBalances}; use serde_derive::{Deserialize, Serialize}; use ssz::four_byte_option_impl; use ssz::Encode; @@ -169,7 +169,7 @@ impl ProtoArray { mut deltas: Vec, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, - new_balances: &[u64], + new_justified_balances: &JustifiedBalances, proposer_boost_root: Hash256, current_slot: Slot, spec: &ChainSpec, @@ -241,9 +241,11 @@ impl ProtoArray { // Invalid nodes (or their ancestors) should not receive a proposer boost. && !execution_status_is_invalid { - proposer_score = - calculate_proposer_boost::(new_balances, proposer_score_boost) - .ok_or(Error::ProposerBoostOverflow(node_index))?; + proposer_score = calculate_committee_fraction::( + new_justified_balances, + proposer_score_boost, + ) + .ok_or(Error::ProposerBoostOverflow(node_index))?; node_delta = node_delta .checked_add(proposer_score as i64) .ok_or(Error::DeltaOverflow(node_index))?; @@ -1006,32 +1008,19 @@ impl ProtoArray { } } -/// A helper method to calculate the proposer boost based on the given `validator_balances`. -/// This does *not* do any verification about whether a boost should or should not be applied. -/// The `validator_balances` array used here is assumed to be structured like the one stored in -/// the `BalancesCache`, where *effective* balances are stored and inactive balances are defaulted -/// to zero. -/// -/// Returns `None` if there is an overflow or underflow when calculating the score. +/// A helper method to calculate the proposer boost based on the given `justified_balances`. /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance -pub fn calculate_proposer_boost( - validator_balances: &[u64], +pub fn calculate_committee_fraction( + justified_balances: &JustifiedBalances, proposer_score_boost: u64, ) -> Option { - let mut total_balance: u64 = 0; - let mut num_validators: u64 = 0; - for &balance in validator_balances { - // We need to filter zero balances here to get an accurate active validator count. - // This is because we default inactive validator balances to zero when creating - // this balances array. - if balance != 0 { - total_balance = total_balance.checked_add(balance)?; - num_validators = num_validators.checked_add(1)?; - } - } - let average_balance = total_balance.checked_div(num_validators)?; - let committee_size = num_validators.checked_div(E::slots_per_epoch())?; + let average_balance = justified_balances + .total_effective_balance + .checked_div(justified_balances.num_active_validators)?; + let committee_size = justified_balances + .num_active_validators + .checked_div(E::slots_per_epoch())?; let committee_weight = committee_size.checked_mul(average_balance)?; committee_weight .checked_mul(proposer_score_boost)? diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 8f5d062ec6a..cbd369ae6ec 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1,9 +1,12 @@ -use crate::error::Error; -use crate::proto_array::CountUnrealizedFull; -use crate::proto_array::{ - calculate_proposer_boost, InvalidationOperation, Iter, ProposerBoost, ProtoArray, ProtoNode, +use crate::{ + error::Error, + proto_array::{ + calculate_committee_fraction, CountUnrealizedFull, InvalidationOperation, Iter, + ProposerBoost, ProtoArray, ProtoNode, + }, + ssz_container::SszContainer, + JustifiedBalances, }; -use crate::ssz_container::SszContainer; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -170,11 +173,128 @@ where } } +/// Information about the proposer head used for opportunistic re-orgs. +#[derive(Clone)] +pub struct ProposerHeadInfo { + /// Information about the *current* head block, which may be re-orged. + pub head_node: ProtoNode, + /// Information about the parent of the current head, which should be selected as the parent + /// for a new proposal *if* a re-org is decided on. + pub parent_node: ProtoNode, + /// The computed fraction of the active committee balance below which we can re-org. + pub re_org_weight_threshold: u64, + /// The current slot from fork choice's point of view, may lead the wall-clock slot by upto + /// 500ms. + pub current_slot: Slot, +} + +/// Error type to enable short-circuiting checks in `get_proposer_head`. +/// +/// This type intentionally does not implement `Debug` so that callers are forced to handle the +/// enum. +#[derive(Clone, PartialEq)] +pub enum ProposerHeadError { + DoNotReOrg(DoNotReOrg), + Error(E), +} + +impl From for ProposerHeadError { + fn from(e: DoNotReOrg) -> ProposerHeadError { + Self::DoNotReOrg(e) + } +} + +impl From for ProposerHeadError { + fn from(e: Error) -> Self { + Self::Error(e) + } +} + +impl ProposerHeadError { + pub fn convert_inner_error(self) -> ProposerHeadError + where + E2: From, + { + self.map_inner_error(E2::from) + } + + pub fn map_inner_error(self, f: impl FnOnce(E1) -> E2) -> ProposerHeadError { + match self { + ProposerHeadError::DoNotReOrg(reason) => ProposerHeadError::DoNotReOrg(reason), + ProposerHeadError::Error(error) => ProposerHeadError::Error(f(error)), + } + } +} + +/// Reasons why a re-org should not be attempted. +/// +/// This type intentionally does not implement `Debug` so that the `Display` impl must be used. +#[derive(Clone, PartialEq)] +pub enum DoNotReOrg { + MissingHeadOrParentNode, + MissingHeadFinalizedCheckpoint, + ParentDistance, + HeadDistance, + ShufflingUnstable, + JustificationAndFinalizationNotCompetitive, + ChainNotFinalizing { + epochs_since_finalization: u64, + }, + HeadNotWeak { + head_weight: u64, + re_org_weight_threshold: u64, + }, + HeadNotLate, + NotProposing, + ReOrgsDisabled, +} + +impl std::fmt::Display for DoNotReOrg { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::MissingHeadOrParentNode => write!(f, "unknown head or parent"), + Self::MissingHeadFinalizedCheckpoint => write!(f, "finalized checkpoint missing"), + Self::ParentDistance => write!(f, "parent too far from head"), + Self::HeadDistance => write!(f, "head too far from current slot"), + Self::ShufflingUnstable => write!(f, "shuffling unstable at epoch boundary"), + Self::JustificationAndFinalizationNotCompetitive => { + write!(f, "justification or finalization not competitive") + } + Self::ChainNotFinalizing { + epochs_since_finalization, + } => write!( + f, + "chain not finalizing ({epochs_since_finalization} epochs since finalization)" + ), + Self::HeadNotWeak { + head_weight, + re_org_weight_threshold, + } => { + write!(f, "head not weak ({head_weight}/{re_org_weight_threshold})") + } + Self::HeadNotLate => { + write!(f, "head arrived on time") + } + Self::NotProposing => { + write!(f, "not proposing at next slot") + } + Self::ReOrgsDisabled => { + write!(f, "re-orgs disabled in config") + } + } + } +} + +/// New-type for the re-org threshold percentage. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct ReOrgThreshold(pub u64); + #[derive(PartialEq)] pub struct ProtoArrayForkChoice { pub(crate) proto_array: ProtoArray, pub(crate) votes: ElasticList, - pub(crate) balances: Vec, + pub(crate) balances: JustifiedBalances, } impl ProtoArrayForkChoice { @@ -223,7 +343,7 @@ impl ProtoArrayForkChoice { Ok(Self { proto_array, votes: ElasticList::default(), - balances: vec![], + balances: JustifiedBalances::default(), }) } @@ -282,21 +402,20 @@ impl ProtoArrayForkChoice { &mut self, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, - justified_state_balances: &[u64], + justified_state_balances: &JustifiedBalances, proposer_boost_root: Hash256, equivocating_indices: &BTreeSet, current_slot: Slot, spec: &ChainSpec, ) -> Result { let old_balances = &mut self.balances; - let new_balances = justified_state_balances; let deltas = compute_deltas( &self.proto_array.indices, &mut self.votes, - old_balances, - new_balances, + &old_balances.effective_balances, + &new_balances.effective_balances, equivocating_indices, ) .map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?; @@ -313,13 +432,129 @@ impl ProtoArrayForkChoice { ) .map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?; - *old_balances = new_balances.to_vec(); + *old_balances = new_balances.clone(); self.proto_array .find_head::(&justified_checkpoint.root, current_slot) .map_err(|e| format!("find_head failed: {:?}", e)) } + /// Get the block to propose on during `current_slot`. + /// + /// This function returns a *definitive* result which should be acted on. + pub fn get_proposer_head( + &self, + current_slot: Slot, + canonical_head: Hash256, + justified_balances: &JustifiedBalances, + re_org_threshold: ReOrgThreshold, + max_epochs_since_finalization: Epoch, + ) -> Result> { + let info = self.get_proposer_head_info::( + current_slot, + canonical_head, + justified_balances, + re_org_threshold, + max_epochs_since_finalization, + )?; + + // Only re-org a single slot. This prevents cascading failures during asynchrony. + let head_slot_ok = info.head_node.slot + 1 == current_slot; + if !head_slot_ok { + return Err(DoNotReOrg::HeadDistance.into()); + } + + // Only re-org if the head's weight is less than the configured committee fraction. + let head_weight = info.head_node.weight; + let re_org_weight_threshold = info.re_org_weight_threshold; + let weak_head = head_weight < re_org_weight_threshold; + if !weak_head { + return Err(DoNotReOrg::HeadNotWeak { + head_weight, + re_org_weight_threshold, + } + .into()); + } + + // All checks have passed, build upon the parent to re-org the head. + Ok(info) + } + + /// Get information about the block to propose on during `current_slot`. + /// + /// This function returns a *partial* result which must be processed further. + pub fn get_proposer_head_info( + &self, + current_slot: Slot, + canonical_head: Hash256, + justified_balances: &JustifiedBalances, + re_org_threshold: ReOrgThreshold, + max_epochs_since_finalization: Epoch, + ) -> Result> { + let mut nodes = self + .proto_array + .iter_nodes(&canonical_head) + .take(2) + .cloned() + .collect::>(); + + let parent_node = nodes.pop().ok_or(DoNotReOrg::MissingHeadOrParentNode)?; + let head_node = nodes.pop().ok_or(DoNotReOrg::MissingHeadOrParentNode)?; + + let parent_slot = parent_node.slot; + let head_slot = head_node.slot; + let re_org_block_slot = head_slot + 1; + + // Check finalization distance. + let proposal_epoch = re_org_block_slot.epoch(E::slots_per_epoch()); + let finalized_epoch = head_node + .unrealized_finalized_checkpoint + .ok_or(DoNotReOrg::MissingHeadFinalizedCheckpoint)? + .epoch; + let epochs_since_finalization = proposal_epoch.saturating_sub(finalized_epoch).as_u64(); + if epochs_since_finalization > max_epochs_since_finalization.as_u64() { + return Err(DoNotReOrg::ChainNotFinalizing { + epochs_since_finalization, + } + .into()); + } + + // Check parent distance from head. + // Do not check head distance from current slot, as that condition needs to be + // late-evaluated and is elided when `current_slot == head_slot`. + let parent_slot_ok = parent_slot + 1 == head_slot; + if !parent_slot_ok { + return Err(DoNotReOrg::ParentDistance.into()); + } + + // Check shuffling stability. + let shuffling_stable = re_org_block_slot % E::slots_per_epoch() != 0; + if !shuffling_stable { + return Err(DoNotReOrg::ShufflingUnstable.into()); + } + + // Check FFG. + let ffg_competitive = parent_node.unrealized_justified_checkpoint + == head_node.unrealized_justified_checkpoint + && parent_node.unrealized_finalized_checkpoint + == head_node.unrealized_finalized_checkpoint; + if !ffg_competitive { + return Err(DoNotReOrg::JustificationAndFinalizationNotCompetitive.into()); + } + + // Compute re-org weight threshold. + let re_org_weight_threshold = + calculate_committee_fraction::(justified_balances, re_org_threshold.0) + .ok_or(Error::ReOrgThresholdOverflow)?; + + Ok(ProposerHeadInfo { + head_node, + parent_node, + re_org_weight_threshold, + current_slot, + }) + } + /// Returns `true` if there are any blocks in `self` with an `INVALID` execution payload status. /// /// This will operate on *all* blocks, even those that do not descend from the finalized @@ -368,7 +603,7 @@ impl ProtoArrayForkChoice { if vote.current_root == node.root { // Any voting validator that does not have a balance should be // ignored. This is consistent with `compute_deltas`. - self.balances.get(validator_index) + self.balances.effective_balances.get(validator_index) } else { None } @@ -382,9 +617,11 @@ impl ProtoArrayForkChoice { // Compute the score based upon the current balances. We can't rely on // the `previous_proposr_boost.score` since it is set to zero with an // invalid node. - let proposer_score = - calculate_proposer_boost::(&self.balances, proposer_score_boost) - .ok_or("Failed to compute proposer boost")?; + let proposer_score = calculate_committee_fraction::( + &self.balances, + proposer_score_boost, + ) + .ok_or("Failed to compute proposer boost")?; // Store the score we've applied here so it can be removed in // a later call to `apply_score_changes`. self.proto_array.previous_proposer_boost.score = proposer_score; @@ -538,10 +775,11 @@ impl ProtoArrayForkChoice { bytes: &[u8], count_unrealized_full: CountUnrealizedFull, ) -> Result { - SszContainer::from_ssz_bytes(bytes) - .map(|container| (container, count_unrealized_full)) - .map(Into::into) - .map_err(|e| format!("Failed to decode ProtoArrayForkChoice: {:?}", e)) + let container = SszContainer::from_ssz_bytes(bytes) + .map_err(|e| format!("Failed to decode ProtoArrayForkChoice: {:?}", e))?; + (container, count_unrealized_full) + .try_into() + .map_err(|e| format!("Failed to initialize ProtoArrayForkChoice: {e:?}")) } /// Returns a read-lock to core `ProtoArray` struct. diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 63f75ed0a2f..1a20ef967ad 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -2,10 +2,12 @@ use crate::proto_array::ProposerBoost; use crate::{ proto_array::{CountUnrealizedFull, ProtoArray, ProtoNode}, proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, + Error, JustifiedBalances, }; use ssz::{four_byte_option_impl, Encode}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; +use std::convert::TryFrom; use types::{Checkpoint, Hash256}; // Define a "legacy" implementation of `Option` which uses four bytes for encoding the union @@ -30,7 +32,7 @@ impl From<&ProtoArrayForkChoice> for SszContainer { Self { votes: from.votes.0.clone(), - balances: from.balances.clone(), + balances: from.balances.effective_balances.clone(), prune_threshold: proto_array.prune_threshold, justified_checkpoint: proto_array.justified_checkpoint, finalized_checkpoint: proto_array.finalized_checkpoint, @@ -41,8 +43,12 @@ impl From<&ProtoArrayForkChoice> for SszContainer { } } -impl From<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice { - fn from((from, count_unrealized_full): (SszContainer, CountUnrealizedFull)) -> Self { +impl TryFrom<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice { + type Error = Error; + + fn try_from( + (from, count_unrealized_full): (SszContainer, CountUnrealizedFull), + ) -> Result { let proto_array = ProtoArray { prune_threshold: from.prune_threshold, justified_checkpoint: from.justified_checkpoint, @@ -53,10 +59,10 @@ impl From<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice { count_unrealized_full, }; - Self { + Ok(Self { proto_array, votes: ElasticList(from.votes), - balances: from.balances, - } + balances: JustifiedBalances::from_effective_balances(from.balances)?, + }) } } diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 9d548b0499a..de6039f35a0 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -597,6 +597,14 @@ fn main() { .takes_value(true) .help("The genesis time when generating a genesis state."), ) + .arg( + Arg::with_name("proposer-score-boost") + .long("proposer-score-boost") + .value_name("INTEGER") + .takes_value(true) + .help("The proposer score boost to apply as a percentage, e.g. 70 = 70%"), + ) + ) .subcommand( SubCommand::with_name("check-deposit-data") diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 5254ff5a62e..b2760829cb8 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -58,6 +58,10 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul spec.genesis_fork_version = v; } + if let Some(proposer_score_boost) = parse_optional(matches, "proposer-score-boost")? { + spec.proposer_score_boost = Some(proposer_score_boost); + } + if let Some(fork_epoch) = parse_optional(matches, "altair-fork-epoch")? { spec.altair_fork_epoch = Some(fork_epoch); } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index d39235cb136..07c583da5cb 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1,6 +1,9 @@ use beacon_node::{beacon_chain::CountUnrealizedFull, ClientConfig as Config}; use crate::exec::{CommandLineTestExec, CompletedTest}; +use beacon_node::beacon_chain::chain_config::{ + DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, +}; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; use std::fs::File; @@ -10,6 +13,7 @@ use std::path::PathBuf; use std::process::Command; use std::str::FromStr; use std::string::ToString; +use std::time::Duration; use tempfile::TempDir; use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec}; use unused_port::{unused_tcp_port, unused_udp_port}; @@ -153,6 +157,31 @@ fn checkpoint_sync_url_timeout_default() { }); } +#[test] +fn prepare_payload_lookahead_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.prepare_payload_lookahead, + Duration::from_secs(4), + ) + }); +} + +#[test] +fn prepare_payload_lookahead_shorter() { + CommandLineTest::new() + .flag("prepare-payload-lookahead", Some("1500")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.prepare_payload_lookahead, + Duration::from_millis(1500) + ) + }); +} + #[test] fn paranoid_block_proposal_default() { CommandLineTest::new() @@ -1500,6 +1529,49 @@ fn ensure_panic_on_failed_launch() { }); } +#[test] +fn enable_proposer_re_orgs_default() { + CommandLineTest::new().run().with_config(|config| { + assert_eq!( + config.chain.re_org_threshold, + Some(DEFAULT_RE_ORG_THRESHOLD) + ); + assert_eq!( + config.chain.re_org_max_epochs_since_finalization, + DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, + ); + }); +} + +#[test] +fn disable_proposer_re_orgs() { + CommandLineTest::new() + .flag("disable-proposer-reorgs", None) + .run() + .with_config(|config| assert_eq!(config.chain.re_org_threshold, None)); +} + +#[test] +fn proposer_re_org_threshold() { + CommandLineTest::new() + .flag("proposer-reorg-threshold", Some("90")) + .run() + .with_config(|config| assert_eq!(config.chain.re_org_threshold.unwrap().0, 90)); +} + +#[test] +fn proposer_re_org_max_epochs_since_finalization() { + CommandLineTest::new() + .flag("proposer-reorg-epochs-since-finalization", Some("8")) + .run() + .with_config(|config| { + assert_eq!( + config.chain.re_org_max_epochs_since_finalization.as_u64(), + 8 + ) + }); +} + #[test] fn monitoring_endpoint() { CommandLineTest::new() diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 6608b7ca64f..f0ed4f737d4 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -389,6 +389,24 @@ fn no_doppelganger_protection_flag() { .with_config(|config| assert!(!config.enable_doppelganger_protection)); } #[test] +fn block_delay_ms() { + CommandLineTest::new() + .flag("block-delay-ms", Some("2000")) + .run() + .with_config(|config| { + assert_eq!( + config.block_delay, + Some(std::time::Duration::from_millis(2000)) + ) + }); +} +#[test] +fn no_block_delay_ms() { + CommandLineTest::new() + .run() + .with_config(|config| assert_eq!(config.block_delay, None)); +} +#[test] fn no_gas_limit_flag() { CommandLineTest::new() .run() diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh index a1348363a9b..82336984afb 100755 --- a/scripts/local_testnet/setup.sh +++ b/scripts/local_testnet/setup.sh @@ -36,6 +36,7 @@ lcli \ --eth1-follow-distance 1 \ --seconds-per-slot $SECONDS_PER_SLOT \ --seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \ + --proposer-score-boost "$PROPOSER_SCORE_BOOST" \ --force echo Specification generated at $TESTNET_DIR. diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index b6ea89794f0..2506e9e1cdf 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -44,5 +44,8 @@ SECONDS_PER_SLOT=3 # Seconds per Eth1 block SECONDS_PER_ETH1_BLOCK=1 +# Proposer score boost percentage +PROPOSER_SCORE_BOOST=70 + # Command line arguments for validator client VC_ARGS="" diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index f0d2c9081f0..d4acbe7563d 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -12,7 +12,9 @@ use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; +use std::time::Duration; use tokio::sync::mpsc; +use tokio::time::sleep; use types::{BlindedPayload, BlockType, EthSpec, ExecPayload, FullPayload, PublicKeyBytes, Slot}; #[derive(Debug)] @@ -44,6 +46,7 @@ pub struct BlockServiceBuilder { context: Option>, graffiti: Option, graffiti_file: Option, + block_delay: Option, } impl BlockServiceBuilder { @@ -55,6 +58,7 @@ impl BlockServiceBuilder { context: None, graffiti: None, graffiti_file: None, + block_delay: None, } } @@ -88,6 +92,11 @@ impl BlockServiceBuilder { self } + pub fn block_delay(mut self, block_delay: Option) -> Self { + self.block_delay = block_delay; + self + } + pub fn build(self) -> Result, String> { Ok(BlockService { inner: Arc::new(Inner { @@ -105,6 +114,7 @@ impl BlockServiceBuilder { .ok_or("Cannot build BlockService without runtime_context")?, graffiti: self.graffiti, graffiti_file: self.graffiti_file, + block_delay: self.block_delay, }), }) } @@ -118,6 +128,7 @@ pub struct Inner { context: RuntimeContext, graffiti: Option, graffiti_file: Option, + block_delay: Option, } /// Attempts to produce attestations for any block producer(s) at the start of the epoch. @@ -162,6 +173,16 @@ impl BlockService { async move { while let Some(notif) = notification_rx.recv().await { let service = self.clone(); + + if let Some(delay) = service.block_delay { + debug!( + service.context.log(), + "Delaying block production by {}ms", + delay.as_millis() + ); + sleep(delay).await; + } + service.do_update(notif).await.ok(); } debug!(log, "Block service shutting down"); diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index ef2e66676a5..c82a1a9d362 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -308,5 +308,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { by this validator client. Note this will not necessarily be used if the gas limit \ set here moves too far from the previous block's gas limit. [default: 30,000,000]") .requires("builder-proposals"), - ) + ) + /* + * Experimental/development options. + */ + .arg( + Arg::with_name("block-delay-ms") + .long("block-delay-ms") + .value_name("MILLIS") + .hidden(true) + .help("Time to delay block production from the start of the slot. Should only be \ + used for testing.") + .takes_value(true), + ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 277a4bd8ded..22741dabbd7 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -13,6 +13,7 @@ use slog::{info, warn, Logger}; use std::fs; use std::net::IpAddr; use std::path::PathBuf; +use std::time::Duration; use types::{Address, GRAFFITI_BYTES_LEN}; pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; @@ -61,6 +62,10 @@ pub struct Config { /// A list of custom certificates that the validator client will additionally use when /// connecting to a beacon node over SSL/TLS. pub beacon_nodes_tls_certs: Option>, + /// Delay from the start of the slot to wait before publishing a block. + /// + /// This is *not* recommended in prod and should only be used for testing. + pub block_delay: Option, /// Disables publishing http api requests to all beacon nodes for select api calls. pub disable_run_on_all: bool, } @@ -95,6 +100,7 @@ impl Default for Config { monitoring_api: None, enable_doppelganger_protection: false, beacon_nodes_tls_certs: None, + block_delay: None, builder_proposals: false, builder_registration_timestamp_override: None, gas_limit: None, @@ -341,6 +347,13 @@ impl Config { ); } + /* + * Experimental + */ + if let Some(delay_ms) = parse_optional::(cli_args, "block-delay-ms")? { + config.block_delay = Some(Duration::from_millis(delay_ms)); + } + Ok(config) } } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 819efec93c9..4db9804054a 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -427,6 +427,7 @@ impl ProductionValidatorClient { .runtime_context(context.service_context("block".into())) .graffiti(config.graffiti) .graffiti_file(config.graffiti_file.clone()) + .block_delay(config.block_delay) .build()?; let attestation_service = AttestationServiceBuilder::new() From 023674ab3b49a4736431c9d3224b66102798ca19 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 14 Dec 2022 01:20:02 +0000 Subject: [PATCH 08/23] Update Gnosis chain bootnodes (#3793) ## Proposed Changes Update the Gnosis chain bootnodes. The current list of Gnosis bootnodes were abandoned at some point before the Gnosis merge and are now failing to bootstrap peers. There's a workaround list of bootnodes here: https://docs.gnosischain.com/updates/20221208-temporary-bootnodes The list from this PR represents the long-term bootnodes run by the Gnosis team. We will also try to set up SigP bootnodes for Gnosis chain at some point. --- .../built_in_network_configs/gnosis/boot_enr.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml index 4b232d8b324..130c1fa1c32 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml @@ -1,5 +1,5 @@ # Gnosis Chain Team -- enr:-IS4QGmLwm7gFd0L0CEisllrb1op3v-wAGSc7_pwSMGgN3bOS9Fz7m1dWbwuuPHKqeETz9MbhjVuoWk0ohkyRv98kVoBgmlkgnY0gmlwhGjtlgaJc2VjcDI1NmsxoQLMdh0It9fJbuiLydZ9fpF6MRzgNle0vODaDiMqhbC7WIN1ZHCCIyg -- enr:-IS4QFUVG3dvLPCUEI7ycRvFm0Ieg_ITa5tALmJ9LI7dJ6ieT3J4fF9xLRjOoB4ApV-Rjp7HeLKzyTWG1xRdbFBNZPQBgmlkgnY0gmlwhErP5weJc2VjcDI1NmsxoQOBbaJBvx0-w_pyZUhQl9A510Ho2T0grE0K8JevzES99IN1ZHCCIyg -- enr:-Ku4QOQk8V-Hu2gxFzRXmLYIO4AvWDZhoMFwTf3n3DYm_mbsWv0ZitoqiN6JZUUj6Li6e1Jk1w2zFSVHKPMUP1g5tsgBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD5Jd3FAAAAZP__________gmlkgnY0gmlwhC1PTpmJc2VjcDI1NmsxoQL1Ynt5PoA0UOcHa1Rfn98rmnRlLzNuWTePPP4m4qHVroN1ZHCCKvg -- enr:-Ku4QFaTwgoms-EiiRIfHUH3FXprWUFgjHg4UuWvilqoUQtDbmTszVIxUEOwQUmA2qkiP-T9wXjc_rVUuh9cU7WgwbgBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD5Jd3FAAAAZP__________gmlkgnY0gmlwhC0hBmCJc2VjcDI1NmsxoQOpsg1XCrXmCwZKcSTcycLwldoKUMHPUpMEVGeg_EEhuYN1ZHCCKvg +- enr:-Ly4QMU1y81COwm1VZgxGF4_eZ21ub9-GHF6dXZ29aEJ0oZpcV2Rysw-viaEKfpcpu9ZarILJLxFZjcKOjE0Sybs3MQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhANLnx-Jc2VjcDI1NmsxoQKoaYT8I-wf2I_f_ii6EgoSSXj5T3bhiDyW-7ZLsY3T64hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QBf76jLiCA_pDXoZjhyRbuwzFOscFY-MIKkPnmHPQbvaKhIDZutfe38G9ibzgQP0RKrTo3vcWOy4hf_8wOZ-U5MBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhBLGgjaJc2VjcDI1NmsxoQLGeo0Q4lDvxIjHjnkAqEuETTaFIjsNrEcSpdDhcHXWFYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QLjZUWdqUO_RwyDqCAccIK5-MbLRD6A2c7oBuVbBgBnWDkEf0UKJVAaJqi2pO101WVQQLYSnYgz1Q3pRhYdrlFoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhANA8sSJc2VjcDI1NmsxoQK4TC_EK1jSs0VVPUpOjIo1rhJmff2SLBPFOWSXMwdLVYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QKwX2rTFtKWKQHSGQFhquxsxL1jewO8JB1MG-jgHqAZVFWxnb3yMoQqnYSV1bk25-_jiLuhIulxar3RBWXEDm6EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhAN-qZeJc2VjcDI1NmsxoQI7EPGMpecl0QofLp4Wy_lYNCCChUFEH6kY7k-oBGkPFIhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA From 63c74b37f4805aee853f8be8d9e2e27886ba898f Mon Sep 17 00:00:00 2001 From: Divma Date: Thu, 15 Dec 2022 00:16:38 +0000 Subject: [PATCH 09/23] send error answering bbrange requests when an error occurrs (#3800) ## Issue Addressed While testing withdrawals with @ethDreamer we noticed lighthouse is sending empty batches when an error occurs. As LH peer receiving this, we would consider this a low tolerance action because the peer is claiming the batch is right and is empty. ## Proposed Changes If any kind of error occurs, send a error response instead ## Additional Info Right now we don't handle such thing as a partial batch with an error. If an error is received, the whole batch is discarded. Because of this it makes little sense to send partial batches that end with an error, so it's better to do the proposed solution instead of sending empty batches. --- .../network/src/beacon_processor/worker/rpc_methods.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 3e354a70d21..bfa0ea516fa 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -398,6 +398,15 @@ impl Worker { "block_root" => ?root, "error" => ?e ); + + // send the stream terminator + self.send_error_response( + peer_id, + RPCResponseErrorCode::ServerError, + "Failed fetching blocks".into(), + request_id, + ); + send_response = false; break; } } From ffbf70e2d9fb1fabc5768453d19878b05e69405d Mon Sep 17 00:00:00 2001 From: Divma Date: Fri, 16 Dec 2022 04:04:00 +0000 Subject: [PATCH 10/23] Clippy lints for rust 1.66 (#3810) ## Issue Addressed Fixes the new clippy lints for rust 1.66 ## Proposed Changes Most of the changes come from: - [unnecessary_cast](https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_cast) - [iter_kv_map](https://rust-lang.github.io/rust-clippy/master/index.html#iter_kv_map) - [needless_borrow](https://rust-lang.github.io/rust-clippy/master/index.html#needless_borrow) ## Additional Info na --- account_manager/src/wallet/list.rs | 2 +- .../src/naive_aggregation_pool.rs | 8 ++------ beacon_node/beacon_chain/src/test_utils.rs | 2 +- .../beacon_chain/src/validator_monitor.rs | 5 +---- beacon_node/eth1/src/deposit_cache.rs | 6 +++--- .../execution_layer/src/engine_api/auth.rs | 2 +- beacon_node/http_api/src/lib.rs | 4 ++-- .../src/peer_manager/peerdb/score.rs | 9 +-------- .../src/rpc/codec/ssz_snappy.rs | 2 +- .../service/gossipsub_scoring_parameters.rs | 4 ++-- .../lighthouse_network/src/service/utils.rs | 2 +- beacon_node/operation_pool/src/attestation.rs | 2 +- beacon_node/store/src/chunked_vector.rs | 2 +- .../src/validator_definitions.rs | 2 +- common/validator_dir/src/builder.rs | 2 +- consensus/serde_utils/src/hex.rs | 6 +++--- consensus/serde_utils/src/u64_hex_be.rs | 2 +- .../altair/sync_committee.rs | 4 ++-- .../altair/rewards_and_penalties.rs | 6 +++--- .../base/rewards_and_penalties.rs | 2 +- consensus/types/src/beacon_state.rs | 2 +- .../types/src/beacon_state/committee_cache.rs | 2 +- consensus/types/src/beacon_state/tests.rs | 20 +++++-------------- consensus/types/src/preset.rs | 2 +- slasher/tests/attester_slashings.rs | 20 +++++++++---------- .../src/nethermind.rs | 2 +- testing/node_test_rig/src/lib.rs | 2 +- validator_client/src/doppelganger_service.rs | 10 +++++----- validator_client/src/http_api/mod.rs | 2 +- .../src/initialized_validators.rs | 2 +- validator_client/src/key_cache.rs | 2 +- 31 files changed, 58 insertions(+), 82 deletions(-) diff --git a/account_manager/src/wallet/list.rs b/account_manager/src/wallet/list.rs index 5b671b1dcec..9190de3915d 100644 --- a/account_manager/src/wallet/list.rs +++ b/account_manager/src/wallet/list.rs @@ -10,7 +10,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { } pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> { - let mgr = WalletManager::open(&wallet_base_dir) + let mgr = WalletManager::open(wallet_base_dir) .map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?; for (name, _uuid) in mgr diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index 252e9915d80..7eeb9bb56f6 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -402,7 +402,7 @@ impl NaiveAggregationPool { /// Returns the total number of items stored in `self`. pub fn num_items(&self) -> usize { - self.maps.iter().map(|(_, map)| map.len()).sum() + self.maps.values().map(T::len).sum() } /// Returns an aggregated `T::Value` with the given `T::Data`, if any. @@ -448,11 +448,7 @@ impl NaiveAggregationPool { // If we have too many maps, remove the lowest amount to ensure we only have // `SLOTS_RETAINED` left. if self.maps.len() > SLOTS_RETAINED { - let mut slots = self - .maps - .iter() - .map(|(slot, _map)| *slot) - .collect::>(); + let mut slots = self.maps.keys().copied().collect::>(); // Sort is generally pretty slow, however `SLOTS_RETAINED` is quite low so it should be // negligible. slots.sort_unstable(); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index d6e8787f4e0..9183583fb1b 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1459,7 +1459,7 @@ where let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); let signed_block = block.sign( - &self.validator_keypairs[proposer_index as usize].sk, + &self.validator_keypairs[proposer_index].sk, &state.fork(), state.genesis_validators_root(), &self.spec, diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 2d093ff886e..e95394bb78d 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -631,10 +631,7 @@ impl ValidatorMonitor { // Return the `id`'s of all monitored validators. pub fn get_all_monitored_validators(&self) -> Vec { - self.validators - .iter() - .map(|(_, val)| val.id.clone()) - .collect() + self.validators.values().map(|val| val.id.clone()).collect() } /// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`. diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs index ab07b380d1f..75391e58a0d 100644 --- a/beacon_node/eth1/src/deposit_cache.rs +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -675,7 +675,7 @@ pub mod tests { #[test] fn test_finalization_boundaries() { let n = 8; - let half = (n / 2) as usize; + let half = n / 2; let mut deposit_cache = get_cache_with_deposits(n as u64); @@ -828,9 +828,9 @@ pub mod tests { // get_log(half+quarter) should return log with index `half+quarter` assert_eq!( q3_log_before_finalization.index, - (half + quarter) as u64, + half + quarter, "log index should be {}", - (half + quarter), + half + quarter, ); // get lower quarter of deposits with max deposit count diff --git a/beacon_node/execution_layer/src/engine_api/auth.rs b/beacon_node/execution_layer/src/engine_api/auth.rs index 8fcdb2543d6..2f4c0cd1e86 100644 --- a/beacon_node/execution_layer/src/engine_api/auth.rs +++ b/beacon_node/execution_layer/src/engine_api/auth.rs @@ -27,7 +27,7 @@ impl From for Error { /// Provides wrapper around `[u8; JWT_SECRET_LENGTH]` that implements `Zeroize`. #[derive(Zeroize, Clone)] #[zeroize(drop)] -pub struct JwtKey([u8; JWT_SECRET_LENGTH as usize]); +pub struct JwtKey([u8; JWT_SECRET_LENGTH]); impl JwtKey { /// Wrap given slice in `Self`. Returns an error if slice.len() != `JWT_SECRET_LENGTH`. diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index b018f9c7375..6cfdaf5db6a 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2840,7 +2840,7 @@ pub fn serve( let is_live = chain.validator_seen_at_epoch(index as usize, request_data.epoch); api_types::LivenessResponseData { - index: index as u64, + index, epoch: request_data.epoch, is_live, } @@ -2876,7 +2876,7 @@ pub fn serve( .and_then( |sysinfo, app_start: std::time::Instant, data_dir, network_globals| { blocking_json_task(move || { - let app_uptime = app_start.elapsed().as_secs() as u64; + let app_uptime = app_start.elapsed().as_secs(); Ok(api_types::GenericResponse::from(observe_system_health_bn( sysinfo, data_dir, diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index fca665db981..bafa355d687 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -186,14 +186,7 @@ impl RealScore { /// Add an f64 to the score abiding by the limits. fn add(&mut self, score: f64) { - let mut new_score = self.lighthouse_score + score; - if new_score > MAX_SCORE { - new_score = MAX_SCORE; - } - if new_score < MIN_SCORE { - new_score = MIN_SCORE; - } - + let new_score = (self.lighthouse_score + score).clamp(MIN_SCORE, MAX_SCORE); self.set_lighthouse_score(new_score); } diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index a4dd602b3fd..eccbf0dd623 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -443,7 +443,7 @@ fn handle_length( // Note: length-prefix of > 10 bytes(uint64) would be a decoding error match uvi_codec.decode(bytes).map_err(RPCError::from)? { Some(length) => { - *len = Some(length as usize); + *len = Some(length); Ok(Some(length)) } None => Ok(None), // need more bytes to decode length diff --git a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs index 71a3953ece1..88becd686e5 100644 --- a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs +++ b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs @@ -270,11 +270,11 @@ impl PeerScoreSettings { let modulo_smaller = max( 1, - smaller_committee_size / self.target_aggregators_per_committee as usize, + smaller_committee_size / self.target_aggregators_per_committee, ); let modulo_larger = max( 1, - (smaller_committee_size + 1) / self.target_aggregators_per_committee as usize, + (smaller_committee_size + 1) / self.target_aggregators_per_committee, ); Ok(( diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 09a8d1a8636..3f02e73c0ea 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -88,7 +88,7 @@ fn keypair_from_hex(hex_bytes: &str) -> error::Result { hex_bytes.to_string() }; - hex::decode(&hex_bytes) + hex::decode(hex_bytes) .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into()) .and_then(keypair_from_bytes) } diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index 4af4edc0e44..fbbd5d7ddcf 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -49,7 +49,7 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { let indices = get_attesting_indices::(committee.committee, &fresh_validators).ok()?; let fresh_validators_rewards: HashMap = indices .iter() - .map(|i| *i as u64) + .copied() .flat_map(|validator_index| { let reward = base::get_base_reward( state, diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 25169b47902..8c64d4bcc05 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -801,7 +801,7 @@ mod test { fn needs_genesis_value_test_randao>(_: F) { let spec = &TestSpec::default_spec(); - let max = TestSpec::slots_per_epoch() as u64 * (F::Length::to_u64() - 1); + let max = TestSpec::slots_per_epoch() * (F::Length::to_u64() - 1); for i in 0..max { assert!( F::slot_needs_genesis_value(Slot::new(i), spec), diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 66e3b735473..6ce2517fb2b 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -189,7 +189,7 @@ impl ValidatorDefinitions { .write(true) .read(true) .create_new(false) - .open(&config_path) + .open(config_path) .map_err(Error::UnableToOpenFile)?; serde_yaml::from_reader(file).map_err(Error::UnableToParseFile) } diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index 596c918b3f4..2b3f670c70f 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -196,7 +196,7 @@ impl<'a> Builder<'a> { if path.exists() { return Err(Error::DepositDataAlreadyExists(path)); } else { - let hex = format!("0x{}", hex::encode(&deposit_data)); + let hex = format!("0x{}", hex::encode(deposit_data)); File::options() .write(true) .read(true) diff --git a/consensus/serde_utils/src/hex.rs b/consensus/serde_utils/src/hex.rs index 1e6c02427f3..9a2cd65c764 100644 --- a/consensus/serde_utils/src/hex.rs +++ b/consensus/serde_utils/src/hex.rs @@ -63,15 +63,15 @@ mod test { #[test] fn encoding() { let bytes = vec![0, 255]; - let hex = encode(&bytes); + let hex = encode(bytes); assert_eq!(hex.as_str(), "0x00ff"); let bytes = vec![]; - let hex = encode(&bytes); + let hex = encode(bytes); assert_eq!(hex.as_str(), "0x"); let bytes = vec![1, 2, 3]; - let hex = encode(&bytes); + let hex = encode(bytes); assert_eq!(hex.as_str(), "0x010203"); } } diff --git a/consensus/serde_utils/src/u64_hex_be.rs b/consensus/serde_utils/src/u64_hex_be.rs index 6af8a75893c..e3364a2d2c9 100644 --- a/consensus/serde_utils/src/u64_hex_be.rs +++ b/consensus/serde_utils/src/u64_hex_be.rs @@ -36,7 +36,7 @@ impl<'de> Visitor<'de> for QuantityVisitor { } else if stripped.starts_with('0') { Err(de::Error::custom("cannot have leading zero")) } else if stripped.len() % 2 != 0 { - hex::decode(&format!("0{}", stripped)) + hex::decode(format!("0{}", stripped)) .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) } else { hex::decode(stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 306e86714c6..a5dcd6e0b61 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -52,10 +52,10 @@ pub fn process_sync_aggregate( .zip(aggregate.sync_committee_bits.iter()) { if participation_bit { - increase_balance(state, participant_index as usize, participant_reward)?; + increase_balance(state, participant_index, participant_reward)?; increase_balance(state, proposer_index as usize, proposer_reward)?; } else { - decrease_balance(state, participant_index as usize, participant_reward)?; + decrease_balance(state, participant_index, participant_reward)?; } } diff --git a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs index ccebbcb3a29..e2aa67a6193 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs @@ -76,7 +76,7 @@ pub fn get_flag_index_deltas( let base_reward = get_base_reward(state, index, base_reward_per_increment, spec)?; let mut delta = Delta::default(); - if unslashed_participating_indices.contains(index as usize)? { + if unslashed_participating_indices.contains(index)? { if !state.is_in_inactivity_leak(previous_epoch, spec) { let reward_numerator = base_reward .safe_mul(weight)? @@ -89,8 +89,8 @@ pub fn get_flag_index_deltas( delta.penalize(base_reward.safe_mul(weight)?.safe_div(WEIGHT_DENOMINATOR)?)?; } deltas - .get_mut(index as usize) - .ok_or(Error::DeltaOutOfBounds(index as usize))? + .get_mut(index) + .ok_or(Error::DeltaOutOfBounds(index))? .combine(delta)?; } Ok(()) diff --git a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs index 87e4261e0a1..e7a4d9c4dcd 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs @@ -235,7 +235,7 @@ fn get_inclusion_delay_delta( let max_attester_reward = base_reward.safe_sub(proposer_reward)?; delta.reward(max_attester_reward.safe_div(inclusion_info.delay)?)?; - let proposer_index = inclusion_info.proposer_index as usize; + let proposer_index = inclusion_info.proposer_index; Ok((delta, Some((proposer_index, proposer_delta)))) } else { Ok((Delta::default(), None)) diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 79625c12e31..12d44741f92 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -482,7 +482,7 @@ impl BeaconState { /// Spec v0.12.1 pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result { let cache = self.committee_cache_at_slot(slot)?; - Ok(cache.committees_per_slot() as u64) + Ok(cache.committees_per_slot()) } /// Compute the number of committees in an entire epoch. diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 7a526acc583..03adaf3d443 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -144,7 +144,7 @@ impl CommitteeCache { self.committees_per_slot as usize, index as usize, ); - let committee = self.compute_committee(committee_index as usize)?; + let committee = self.compute_committee(committee_index)?; Some(BeaconCommittee { slot, diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 5898bfe214f..abca10e3726 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -344,12 +344,7 @@ mod committees { let cache_epoch = cache_epoch.into_epoch(state_epoch); - execute_committee_consistency_test( - new_head_state, - cache_epoch, - validator_count as usize, - spec, - ); + execute_committee_consistency_test(new_head_state, cache_epoch, validator_count, spec); } async fn committee_consistency_test_suite(cached_epoch: RelativeEpoch) { @@ -361,18 +356,13 @@ mod committees { .mul(spec.target_committee_size) .add(1); - committee_consistency_test::(validator_count as usize, Epoch::new(0), cached_epoch) - .await; + committee_consistency_test::(validator_count, Epoch::new(0), cached_epoch).await; - committee_consistency_test::( - validator_count as usize, - T::genesis_epoch() + 4, - cached_epoch, - ) - .await; + committee_consistency_test::(validator_count, T::genesis_epoch() + 4, cached_epoch) + .await; committee_consistency_test::( - validator_count as usize, + validator_count, T::genesis_epoch() + (T::slots_per_historical_root() as u64) .mul(T::slots_per_epoch()) diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 8ee38e46a6d..fc5aa873006 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -202,7 +202,7 @@ mod test { } fn preset_from_file(preset_name: &str, filename: &str) -> T { - let f = File::open(&presets_base_path().join(preset_name).join(filename)) + let f = File::open(presets_base_path().join(preset_name).join(filename)) .expect("preset file exists"); serde_yaml::from_reader(f).unwrap() } diff --git a/slasher/tests/attester_slashings.rs b/slasher/tests/attester_slashings.rs index 5cf3fe6c2a3..40d9fa511c6 100644 --- a/slasher/tests/attester_slashings.rs +++ b/slasher/tests/attester_slashings.rs @@ -39,8 +39,8 @@ fn double_vote_multi_vals() { fn double_vote_some_vals() { let v1 = vec![0, 1, 2, 3, 4, 5, 6]; let v2 = vec![0, 2, 4, 6]; - let att1 = indexed_att(&v1, 0, 1, 0); - let att2 = indexed_att(&v2, 0, 1, 1); + let att1 = indexed_att(v1, 0, 1, 0); + let att2 = indexed_att(v2, 0, 1, 1); let slashings = hashset![att_slashing(&att1, &att2)]; let attestations = vec![att1, att2]; slasher_test_indiv(&attestations, &slashings, 1); @@ -53,9 +53,9 @@ fn double_vote_some_vals_repeat() { let v1 = vec![0, 1, 2, 3, 4, 5, 6]; let v2 = vec![0, 2, 4, 6]; let v3 = vec![1, 3, 5]; - let att1 = indexed_att(&v1, 0, 1, 0); - let att2 = indexed_att(&v2, 0, 1, 1); - let att3 = indexed_att(&v3, 0, 1, 0); + let att1 = indexed_att(v1, 0, 1, 0); + let att2 = indexed_att(v2, 0, 1, 1); + let att3 = indexed_att(v3, 0, 1, 0); let slashings = hashset![att_slashing(&att1, &att2)]; let attestations = vec![att1, att2, att3]; slasher_test_indiv(&attestations, &slashings, 1); @@ -67,8 +67,8 @@ fn double_vote_some_vals_repeat() { fn no_double_vote_same_target() { let v1 = vec![0, 1, 2, 3, 4, 5, 6]; let v2 = vec![0, 1, 2, 3, 4, 5, 7, 8]; - let att1 = indexed_att(&v1, 0, 1, 0); - let att2 = indexed_att(&v2, 0, 1, 0); + let att1 = indexed_att(v1, 0, 1, 0); + let att2 = indexed_att(v2, 0, 1, 0); let attestations = vec![att1, att2]; slasher_test_indiv(&attestations, &hashset! {}, 1); slasher_test_indiv(&attestations, &hashset! {}, 1000); @@ -79,8 +79,8 @@ fn no_double_vote_same_target() { fn no_double_vote_distinct_vals() { let v1 = vec![0, 1, 2, 3]; let v2 = vec![4, 5, 6, 7]; - let att1 = indexed_att(&v1, 0, 1, 0); - let att2 = indexed_att(&v2, 0, 1, 1); + let att1 = indexed_att(v1, 0, 1, 0); + let att2 = indexed_att(v2, 0, 1, 1); let attestations = vec![att1, att2]; slasher_test_indiv(&attestations, &hashset! {}, 1); slasher_test_indiv(&attestations, &hashset! {}, 1000); @@ -89,7 +89,7 @@ fn no_double_vote_distinct_vals() { #[test] fn no_double_vote_repeated() { let v = vec![0, 1, 2, 3, 4]; - let att1 = indexed_att(&v, 0, 1, 0); + let att1 = indexed_att(v, 0, 1, 0); let att2 = att1.clone(); let attestations = vec![att1, att2]; slasher_test_indiv(&attestations, &hashset! {}, 1); diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index f643fbd5f2c..740d87ab8ae 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -76,7 +76,7 @@ impl GenericExecutionEngine for NethermindEngine { fn init_datadir() -> TempDir { let datadir = TempDir::new().unwrap(); let genesis_json_path = datadir.path().join("genesis.json"); - let mut file = File::create(&genesis_json_path).unwrap(); + let mut file = File::create(genesis_json_path).unwrap(); let json = nethermind_genesis_json(); serde_json::to_writer(&mut file, &json).unwrap(); datadir diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index d0a4ef94917..82a60cda2f2 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -231,7 +231,7 @@ impl LocalExecutionNode { .tempdir() .expect("should create temp directory for client datadir"); let jwt_file_path = datadir.path().join("jwt.hex"); - if let Err(e) = std::fs::write(&jwt_file_path, config.jwt_key.hex_string()) { + if let Err(e) = std::fs::write(jwt_file_path, config.jwt_key.hex_string()) { panic!("Failed to write jwt file {}", e); } Self { diff --git a/validator_client/src/doppelganger_service.rs b/validator_client/src/doppelganger_service.rs index e6934ed48b0..558b9e199f0 100644 --- a/validator_client/src/doppelganger_service.rs +++ b/validator_client/src/doppelganger_service.rs @@ -441,7 +441,7 @@ impl DoppelgangerService { } // Get a list of indices to provide to the BN API. - let indices_only = indices_map.iter().map(|(index, _)| *index).collect(); + let indices_only = indices_map.keys().copied().collect(); // Pull the liveness responses from the BN. let request_epoch = request_slot.epoch(E::slots_per_epoch()); @@ -971,16 +971,16 @@ mod test { LivenessResponses { current_epoch_responses: detection_indices .iter() - .map(|i| LivenessResponseData { - index: *i as u64, + .map(|&index| LivenessResponseData { + index, epoch: current_epoch, is_live: false, }) .collect(), previous_epoch_responses: detection_indices .iter() - .map(|i| LivenessResponseData { - index: *i as u64, + .map(|&index| LivenessResponseData { + index, epoch: current_epoch - 1, is_live: false, }) diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 600e7a4c683..b87bb083811 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -331,7 +331,7 @@ pub fn serve( .and(signer.clone()) .and_then(|sysinfo, app_start: std::time::Instant, val_dir, signer| { blocking_signed_json_task(signer, move || { - let app_uptime = app_start.elapsed().as_secs() as u64; + let app_uptime = app_start.elapsed().as_secs(); Ok(api_types::GenericResponse::from(observe_system_health_vc( sysinfo, val_dir, app_uptime, ))) diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index e8fe6ff2ff9..d3f6b1eb69a 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -472,7 +472,7 @@ impl InitializedValidators { /// Iterate through all voting public keys in `self` that should be used when querying for duties. pub fn iter_voting_pubkeys(&self) -> impl Iterator { - self.validators.iter().map(|(pubkey, _)| pubkey) + self.validators.keys() } /// Returns the voting `Keypair` for a given voting `PublicKey`, if all are true: diff --git a/validator_client/src/key_cache.rs b/validator_client/src/key_cache.rs index 2088aa6831d..b7abaaed069 100644 --- a/validator_client/src/key_cache.rs +++ b/validator_client/src/key_cache.rs @@ -104,7 +104,7 @@ impl KeyCache { let file = File::options() .read(true) .create_new(false) - .open(&cache_path) + .open(cache_path) .map_err(Error::UnableToOpenFile)?; serde_json::from_reader(file).map_err(Error::UnableToParseFile) } From 53aad18da3c1e0ab9ac6d6572eedab3419db0535 Mon Sep 17 00:00:00 2001 From: John Adler Date: Tue, 20 Dec 2022 01:34:51 +0000 Subject: [PATCH 11/23] docs: remove mention of phases in voluntary exits (#3776) The notion of "phases" doesn't exist anymore in the Ethereum roadmap. Also fix dead link to roadmap. Co-authored-by: Michael Sproul --- book/src/voluntary-exit.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/book/src/voluntary-exit.md b/book/src/voluntary-exit.md index 69c2d7598bb..5056040e4c6 100644 --- a/book/src/voluntary-exit.md +++ b/book/src/voluntary-exit.md @@ -12,10 +12,10 @@ This number can be much higher depending on how many other validators are queued ## Withdrawal of exited funds -Even though users can perform a voluntary exit in phase 0, they **cannot withdraw their exited funds at this point in time**. -This implies that the staked funds are effectively **frozen** until withdrawals are enabled in future phases. +Even though users can currently perform a voluntary exit, they **cannot withdraw their exited funds at this point in time**. +This implies that the staked funds are effectively **frozen** until withdrawals are enabled in a future hard fork (Capella). -To understand the phased rollout strategy for Ethereum upgrades, please visit . +To understand the rollout strategy for Ethereum upgrades, please visit . From 59a7a4703c04a9cffb4402bf2aa3bdb04462b2b5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 20 Dec 2022 01:34:52 +0000 Subject: [PATCH 12/23] Various CI fixes (#3813) ## Issue Addressed Closes #3812 Closes #3750 Closes #3705 --- .github/workflows/local-testnet.yml | 2 +- .github/workflows/release.yml | 27 +++++++++++----------- .github/workflows/test-suite.yml | 36 +++++++++++++++-------------- testing/ef_tests/Makefile | 6 +++-- testing/web3signer_tests/build.rs | 17 +++++++++++--- 5 files changed, 51 insertions(+), 37 deletions(-) diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index b916ffee65a..a522f2efb99 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -21,7 +21,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 957d016dc65..8ca6ab0f923 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -8,8 +8,8 @@ on: env: DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - REPO_NAME: sigp/lighthouse - IMAGE_NAME: sigp/lighthouse + REPO_NAME: ${{ github.repository_owner }}/lighthouse + IMAGE_NAME: ${{ github.repository_owner }}/lighthouse jobs: extract-version: @@ -63,12 +63,8 @@ jobs: steps: - name: Checkout sources uses: actions/checkout@v3 - - name: Build toolchain - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - profile: minimal - override: true + - name: Get latest version of stable Rust + run: rustup update stable # ============================== # Windows dependencies @@ -88,7 +84,7 @@ jobs: # ============================== - name: Install Protoc if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows') - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} @@ -179,13 +175,13 @@ jobs: # ======================================================================= - name: Upload artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz - name: Upload signature - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc @@ -208,7 +204,7 @@ jobs: # ============================== - name: Download artifacts - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 # ============================== # Create release draft @@ -216,11 +212,14 @@ jobs: - name: Generate Full Changelog id: changelog - run: echo "CHANGELOG=$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" >> $GITHUB_OUTPUT + run: | + echo "CHANGELOG<> $GITHUB_OUTPUT + echo "$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT - name: Create Release Draft env: - GITHUB_USER: sigp + GITHUB_USER: ${{ github.repository_owner }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # The formatting here is borrowed from OpenEthereum: https://github.com/openethereum/openethereum/blob/main/.github/workflows/build.yml diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index d536869e457..d95ac61167e 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -13,6 +13,8 @@ env: RUSTFLAGS: "-D warnings" # The Nightly version used for cargo-udeps, might need updating from time to time. PINNED_NIGHTLY: nightly-2022-05-20 + # Prevent Github API rate limiting. + LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} jobs: target-branch-check: name: target-branch-check @@ -51,7 +53,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache @@ -95,7 +97,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run beacon_chain tests for all known forks @@ -109,7 +111,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run operation_pool tests for all known forks @@ -133,7 +135,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache @@ -149,7 +151,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run state_transition_vectors in release. @@ -163,7 +165,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run consensus-spec-tests with blst, milagro and fake_crypto @@ -189,7 +191,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache @@ -205,7 +207,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache @@ -221,7 +223,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache @@ -237,7 +239,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache @@ -253,7 +255,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install ganache @@ -285,7 +287,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run exec engine integration tests in release @@ -299,7 +301,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Typecheck benchmark code without running it @@ -323,7 +325,7 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Lint code for quality and style with Clippy @@ -346,7 +348,7 @@ jobs: cargo build --release --bin cargo-clippy --bin clippy-driver cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run Clippy with the disallowed-from-async lint @@ -360,7 +362,7 @@ jobs: - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Run cargo check @@ -404,7 +406,7 @@ jobs: # NOTE: cargo-udeps version is pinned until this issue is resolved: # https://github.com/est31/cargo-udeps/issues/135 - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Install cargo-udeps diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index e05ef0b06b1..b2af490dd0e 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -13,6 +13,8 @@ BLS_TARBALL = $(patsubst %,%-$(BLS_TEST_TAG).tar.gz,$(BLS_TEST)) BLS_OUTPUT_DIR := $(OUTPUT_DIR)/$(BLS_TEST_REPO_NAME) BLS_BASE_URL := https://github.com/ethereum/$(BLS_TEST_REPO_NAME)/releases/download/$(BLS_TEST_TAG) +WGET := $(if $(LIGHTHOUSE_GITHUB_TOKEN),wget --header="Authorization: $(LIGHTHOUSE_GITHUB_TOKEN)",wget) + all: make $(OUTPUT_DIR) make $(BLS_OUTPUT_DIR) @@ -25,11 +27,11 @@ $(OUTPUT_DIR): $(TARBALLS) $(BLS_OUTPUT_DIR): mkdir $(BLS_OUTPUT_DIR) - wget $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -O $(BLS_TARBALL) + $(WGET) $(BLS_BASE_URL)/$(BLS_TEST).tar.gz -O $(BLS_TARBALL) tar -xzf $(BLS_TARBALL) -C $(BLS_OUTPUT_DIR) %-$(TESTS_TAG).tar.gz: - wget $(BASE_URL)/$*.tar.gz -O $@ + $(WGET) $(BASE_URL)/$*.tar.gz -O $@ clean-test-files: rm -rf $(OUTPUT_DIR) $(BLS_OUTPUT_DIR) diff --git a/testing/web3signer_tests/build.rs b/testing/web3signer_tests/build.rs index f62dff0b6fe..a55c39376a4 100644 --- a/testing/web3signer_tests/build.rs +++ b/testing/web3signer_tests/build.rs @@ -1,7 +1,10 @@ //! This build script downloads the latest Web3Signer release and places it in the `OUT_DIR` so it //! can be used for integration testing. -use reqwest::Client; +use reqwest::{ + header::{self, HeaderValue}, + Client, +}; use serde_json::Value; use std::env; use std::fs; @@ -15,10 +18,15 @@ const FIXED_VERSION_STRING: Option<&str> = None; #[tokio::main] async fn main() { let out_dir = env::var("OUT_DIR").unwrap(); - download_binary(out_dir.into()).await; + + // Read a Github API token from the environment. This is intended to prevent rate-limits on CI. + // We use a name that is unlikely to accidentally collide with anything the user has configured. + let github_token = env::var("LIGHTHOUSE_GITHUB_TOKEN"); + + download_binary(out_dir.into(), github_token.as_deref().unwrap_or("")).await; } -pub async fn download_binary(dest_dir: PathBuf) { +pub async fn download_binary(dest_dir: PathBuf, github_token: &str) { let version_file = dest_dir.join("version"); let client = Client::builder() @@ -33,8 +41,11 @@ pub async fn download_binary(dest_dir: PathBuf) { env_version } else { // Get the latest release of the web3 signer repo. + let mut token_header_value = HeaderValue::from_str(github_token).unwrap(); + token_header_value.set_sensitive(true); let latest_response: Value = client .get("https://api.github.com/repos/ConsenSys/web3signer/releases/latest") + .header(header::AUTHORIZATION, token_header_value) .send() .await .unwrap() From 4e5e7ee1fcbb60b9a36260da150ad8215c0d37ba Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 5 Jan 2023 17:18:24 +0000 Subject: [PATCH 13/23] Restructure code for libp2p upgrade (#3850) Our custom RPC implementation is lagging from the libp2p v50 version. We are going to need to change a bunch of function names and would be nice to have consistent ordering of function names inside the handlers. This is a precursor to the libp2p upgrade to minimize merge conflicts in function ordering. --- .../lighthouse_network/src/rpc/handler.rs | 113 +++++++++--------- 1 file changed, 58 insertions(+), 55 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 9d6229eb382..a1743c15fb6 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -327,61 +327,6 @@ where self.listen_protocol.clone() } - fn inject_fully_negotiated_inbound( - &mut self, - substream: >::Output, - _info: Self::InboundOpenInfo, - ) { - // only accept new peer requests when active - if !matches!(self.state, HandlerState::Active) { - return; - } - - let (req, substream) = substream; - let expected_responses = req.expected_responses(); - - // store requests that expect responses - if expected_responses > 0 { - if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS { - // Store the stream and tag the output. - let delay_key = self.inbound_substreams_delay.insert( - self.current_inbound_substream_id, - Duration::from_secs(RESPONSE_TIMEOUT), - ); - let awaiting_stream = InboundState::Idle(substream); - self.inbound_substreams.insert( - self.current_inbound_substream_id, - InboundInfo { - state: awaiting_stream, - pending_items: VecDeque::with_capacity(expected_responses as usize), - delay_key: Some(delay_key), - protocol: req.protocol(), - request_start_time: Instant::now(), - remaining_chunks: expected_responses, - }, - ); - } else { - self.events_out.push(Err(HandlerErr::Inbound { - id: self.current_inbound_substream_id, - proto: req.protocol(), - error: RPCError::HandlerRejected, - })); - return self.shutdown(None); - } - } - - // If we received a goodbye, shutdown the connection. - if let InboundRequest::Goodbye(_) = req { - self.shutdown(None); - } - - self.events_out.push(Ok(RPCReceived::Request( - self.current_inbound_substream_id, - req, - ))); - self.current_inbound_substream_id.0 += 1; - } - fn inject_fully_negotiated_outbound( &mut self, out: >::Output, @@ -438,6 +383,64 @@ where } } + fn inject_fully_negotiated_inbound( + &mut self, + substream: >::Output, + _info: Self::InboundOpenInfo, + ) { + // only accept new peer requests when active + if !matches!(self.state, HandlerState::Active) { + return; + } + + let (req, substream) = substream; + let expected_responses = req.expected_responses(); + + // store requests that expect responses + if expected_responses > 0 { + if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS { + // Store the stream and tag the output. + let delay_key = self.inbound_substreams_delay.insert( + self.current_inbound_substream_id, + Duration::from_secs(RESPONSE_TIMEOUT), + ); + let awaiting_stream = InboundState::Idle(substream); + self.inbound_substreams.insert( + self.current_inbound_substream_id, + InboundInfo { + state: awaiting_stream, + pending_items: VecDeque::with_capacity(std::cmp::min( + expected_responses, + 128, + ) as usize), + delay_key: Some(delay_key), + protocol: req.protocol(), + request_start_time: Instant::now(), + remaining_chunks: expected_responses, + }, + ); + } else { + self.events_out.push(Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto: req.protocol(), + error: RPCError::HandlerRejected, + })); + return self.shutdown(None); + } + } + + // If we received a goodbye, shutdown the connection. + if let InboundRequest::Goodbye(_) = req { + self.shutdown(None); + } + + self.events_out.push(Ok(RPCReceived::Request( + self.current_inbound_substream_id, + req, + ))); + self.current_inbound_substream_id.0 += 1; + } + fn inject_event(&mut self, rpc_event: Self::InEvent) { match rpc_event { RPCSend::Request(id, req) => self.send_request(id, req), From 1d9a2022b41ea62ef91cc8b8ba8fd44d28e7bbff Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 6 Jan 2023 15:59:33 +0000 Subject: [PATCH 14/23] Upgrade to libp2p v0.50.0 (#3764) I've needed to do this work in order to do some episub testing. This version of libp2p has not yet been released, so this is left as a draft for when we wish to update. Co-authored-by: Diva M --- .github/workflows/test-suite.yml | 23 +- Cargo.lock | 2151 ++++++++++++++--- beacon_node/http_api/tests/common.rs | 20 +- beacon_node/lighthouse_network/Cargo.toml | 5 +- .../lighthouse_network/src/discovery/mod.rs | 87 +- .../src/peer_manager/mod.rs | 2 +- .../src/peer_manager/network_behaviour.rs | 127 +- .../src/peer_manager/peerdb/client.rs | 2 +- .../src/service/behaviour.rs | 4 +- .../lighthouse_network/src/service/mod.rs | 20 +- .../lighthouse_network/src/service/utils.rs | 3 +- .../tests/{common/mod.rs => common.rs} | 7 - .../tests/common/behaviour.rs | 395 --- .../lighthouse_network/tests/common/swarm.rs | 99 - .../lighthouse_network/tests/pm_tests.rs | 203 -- bors.toml | 1 - 16 files changed, 1961 insertions(+), 1188 deletions(-) rename beacon_node/lighthouse_network/tests/{common/mod.rs => common.rs} (98%) delete mode 100644 beacon_node/lighthouse_network/tests/common/behaviour.rs delete mode 100644 beacon_node/lighthouse_network/tests/common/swarm.rs delete mode 100644 beacon_node/lighthouse_network/tests/pm_tests.rs diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index d95ac61167e..8d52f7fa7e2 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -12,7 +12,7 @@ env: # Deny warnings in CI RUSTFLAGS: "-D warnings" # The Nightly version used for cargo-udeps, might need updating from time to time. - PINNED_NIGHTLY: nightly-2022-05-20 + PINNED_NIGHTLY: nightly-2022-12-15 # Prevent Github API rate limiting. LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} jobs: @@ -332,27 +332,6 @@ jobs: run: make lint - name: Certify Cargo.lock freshness run: git diff --exit-code Cargo.lock - disallowed-from-async-lint: - name: disallowed-from-async-lint - runs-on: ubuntu-latest - needs: cargo-fmt - continue-on-error: true - steps: - - uses: actions/checkout@v3 - - name: Install SigP Clippy fork - run: | - cd .. - git clone https://github.com/michaelsproul/rust-clippy.git - cd rust-clippy - git checkout 31a49666ccfcd7963b63345d6ce757c373f22c2a - cargo build --release --bin cargo-clippy --bin clippy-driver - cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin - - name: Install Protoc - uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Run Clippy with the disallowed-from-async lint - run: make nightly-lint check-msrv: name: check-msrv runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index 2f7cd873dd7..cc1ad52477e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -50,9 +50,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ "gimli", ] @@ -69,6 +69,15 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" +[[package]] +name = "aead" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" +dependencies = [ + "generic-array", +] + [[package]] name = "aead" version = "0.4.3" @@ -76,6 +85,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" dependencies = [ "generic-array", + "rand_core 0.6.4", +] + +[[package]] +name = "aes" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" +dependencies = [ + "aes-soft", + "aesni", + "cipher 0.2.5", ] [[package]] @@ -85,26 +106,60 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ "cfg-if", - "cipher", + "cipher 0.3.0", "cpufeatures", - "ctr", + "ctr 0.8.0", "opaque-debug", ] +[[package]] +name = "aes-gcm" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5278b5fabbb9bd46e24aa69b2fdea62c99088e0a950a9be40e3e0101298f88da" +dependencies = [ + "aead 0.3.2", + "aes 0.6.0", + "cipher 0.2.5", + "ctr 0.6.0", + "ghash 0.3.1", + "subtle", +] + [[package]] name = "aes-gcm" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" dependencies = [ - "aead", - "aes", - "cipher", - "ctr", - "ghash", + "aead 0.4.3", + "aes 0.7.5", + "cipher 0.3.0", + "ctr 0.8.0", + "ghash 0.4.4", "subtle", ] +[[package]] +name = "aes-soft" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" +dependencies = [ + "cipher 0.2.5", + "opaque-debug", +] + +[[package]] +name = "aesni" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" +dependencies = [ + "cipher 0.2.5", + "opaque-debug", +] + [[package]] name = "ahash" version = "0.7.6" @@ -118,9 +173,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.19" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] @@ -181,12 +236,109 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +[[package]] +name = "asn1-rs" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ff05a702273012438132f449575dbc804e27b2f3cbe3069aa237d26c98fa33" +dependencies = [ + "asn1-rs-derive 0.1.0", + "asn1-rs-impl", + "displaydoc", + "nom 7.1.1", + "num-traits", + "rusticata-macros", + "thiserror", + "time 0.3.17", +] + +[[package]] +name = "asn1-rs" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf6690c370453db30743b373a60ba498fc0d6d83b11f4abfd87a84a075db5dd4" +dependencies = [ + "asn1-rs-derive 0.4.0", + "asn1-rs-impl", + "displaydoc", + "nom 7.1.1", + "num-traits", + "rusticata-macros", + "thiserror", + "time 0.3.17", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db8b7511298d5b7784b40b092d9e9dcd3a627a5707e4b5e507931ab0d44eeebf" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "asn1_der" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" +[[package]] +name = "async-io" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" +dependencies = [ + "async-lock", + "autocfg 1.1.0", + "concurrent-queue", + "futures-lite", + "libc", + "log", + "parking", + "polling", + "slab", + "socket2", + "waker-fn", + "windows-sys 0.42.0", +] + +[[package]] +name = "async-lock" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +dependencies = [ + "event-listener", + "futures-lite", +] + [[package]] name = "async-stream" version = "0.3.3" @@ -210,9 +362,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.58" +version = "0.1.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" +checksum = "31e6e93155431f3931513b243d371981bb2770112b370c82745a1d19d2f99364" dependencies = [ "proc-macro2", "quote", @@ -232,9 +384,9 @@ dependencies = [ [[package]] name = "asynchronous-codec" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0de5164e5edbf51c45fb8c2d9664ae1c095cce1b265ecf7569093c0d66ef690" +checksum = "06a0daa378f5fd10634e44b0a29b2a87b890657658e072a30d6f26e57ddee182" dependencies = [ "bytes", "futures-sink", @@ -243,6 +395,12 @@ dependencies = [ "pin-project-lite 0.2.9", ] +[[package]] +name = "atomic-waker" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" + [[package]] name = "attohttpc" version = "0.10.1" @@ -341,9 +499,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.66" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ "addr2line", "cc", @@ -354,6 +512,12 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base-x" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" + [[package]] name = "base16ct" version = "0.1.1" @@ -415,7 +579,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "logging", - "lru", + "lru 0.7.8", "maplit", "merkle_proof", "oneshot_broadcast", @@ -535,16 +699,16 @@ dependencies = [ "funty 2.0.0", "radium 0.7.0", "tap", - "wyz 0.5.0", + "wyz 0.5.1", ] [[package]] name = "blake2" -version = "0.10.4" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" +checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -566,6 +730,16 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-modes" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a0e8073e8baa88212fb5823574c02ebccb395136ba9a164ab89379ec6072f0" +dependencies = [ + "block-padding", + "cipher 0.2.5", +] + [[package]] name = "block-padding" version = "0.2.1" @@ -630,6 +804,51 @@ dependencies = [ "types", ] +[[package]] +name = "borsh" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" +dependencies = [ + "borsh-derive", + "hashbrown 0.11.2", +] + +[[package]] +name = "borsh-derive" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate 0.1.5", + "proc-macro2", + "syn", +] + +[[package]] +name = "borsh-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "bs58" version = "0.4.0" @@ -681,6 +900,27 @@ version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" +[[package]] +name = "bytecheck" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f" +dependencies = [ + "bytecheck_derive", + "ptr_meta", +] + +[[package]] +name = "bytecheck_derive" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "byteorder" version = "1.4.3" @@ -689,9 +929,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" dependencies = [ "serde", ] @@ -740,9 +980,20 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.74" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581f5dba903aac52ea3feb5ec4810848460ee833876f1f9b0fdeab1f19091574" +checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" + +[[package]] +name = "ccm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aca1a8fbc20b50ac9673ff014abfb2b5f4085ee1a850d408f14a159c5853ac7" +dependencies = [ + "aead 0.3.2", + "cipher 0.2.5", + "subtle", +] [[package]] name = "cexpr" @@ -766,7 +1017,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ "cfg-if", - "cipher", + "cipher 0.3.0", "cpufeatures", "zeroize", ] @@ -777,28 +1028,37 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" dependencies = [ - "aead", + "aead 0.4.3", "chacha20", - "cipher", + "cipher 0.3.0", "poly1305", "zeroize", ] [[package]] name = "chrono" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" dependencies = [ "iana-time-zone", "js-sys", "num-integer", "num-traits", - "time 0.1.44", + "time 0.1.45", "wasm-bindgen", "winapi", ] +[[package]] +name = "cipher" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" +dependencies = [ + "generic-array", +] + [[package]] name = "cipher" version = "0.3.0" @@ -882,7 +1142,7 @@ dependencies = [ "slot_clock", "store", "task_executor", - "time 0.3.16", + "time 0.3.17", "timer", "tokio", "types", @@ -922,6 +1182,15 @@ dependencies = [ "syn", ] +[[package]] +name = "concurrent-queue" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd7bef69dc86e3c610e4e7aed41035e2a7ed12e72dd7530f61327a6579a4390b" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "console_error_panic_hook" version = "0.1.7" @@ -934,9 +1203,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" +checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" [[package]] name = "convert_case" @@ -978,6 +1247,27 @@ dependencies = [ "libc", ] +[[package]] +name = "cpuid-bool" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" + +[[package]] +name = "crc" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53757d12b596c16c78b83458d732a5d1a17ab3f53f2f7412f6fb57cc8a140ab3" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d0165d2900ae6778e36e80bbc4da3b5eefccee9ba939761f9c2882a5d9af3ff" + [[package]] name = "crc32fast" version = "1.3.2" @@ -1046,22 +1336,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.11" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" +checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" dependencies = [ "autocfg 1.1.0", "cfg-if", "crossbeam-utils", - "memoffset", + "memoffset 0.7.1", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" +checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" dependencies = [ "cfg-if", ] @@ -1104,6 +1394,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "crypto-mac" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" +dependencies = [ + "generic-array", + "subtle", +] + [[package]] name = "crypto-mac" version = "0.11.1" @@ -1136,23 +1436,32 @@ dependencies = [ "memchr", ] +[[package]] +name = "ctr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" +dependencies = [ + "cipher 0.2.5", +] + [[package]] name = "ctr" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" dependencies = [ - "cipher", + "cipher 0.3.0", ] [[package]] name = "ctrlc" -version = "3.2.3" +version = "3.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d91974fbbe88ec1df0c24a4f00f99583667a7e2e6272b2b92d294d81e462173" +checksum = "1631ca6e3c59112501a9d87fd86f21591ff77acd31331e8a73f8d80a65bbdd71" dependencies = [ - "nix 0.25.0", - "winapi", + "nix 0.26.1", + "windows-sys 0.42.0", ] [[package]] @@ -1170,22 +1479,23 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-pre.1" +version = "4.0.0-pre.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4033478fbf70d6acf2655ac70da91ee65852d69daf7a67bf7a2f518fb47aafcf" +checksum = "67bc65846be335cb20f4e52d49a437b773a2c1fdb42b19fc84e79e6f6771536f" dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.6.4", + "cfg-if", + "fiat-crypto", + "packed_simd_2", + "platforms 3.0.2", "subtle", "zeroize", ] [[package]] name = "cxx" -version = "1.0.80" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b7d4e43b25d3c994662706a1d4fcfc32aaa6afd287502c111b237093bb23f3a" +checksum = "bdf07d07d6531bfcdbe9b8b739b104610c6508dcc4d63b410585faf338241daf" dependencies = [ "cc", "cxxbridge-flags", @@ -1195,9 +1505,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.80" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f8829ddc213e2c1368e51a2564c552b65a8cb6a28f31e576270ac81d5e5827" +checksum = "d2eb5b96ecdc99f72657332953d4d9c50135af1bac34277801cc3937906ebd39" dependencies = [ "cc", "codespan-reporting", @@ -1210,15 +1520,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.80" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e72537424b474af1460806647c41d4b6d35d09ef7fe031c5c2fa5766047cc56a" +checksum = "ac040a39517fd1674e0f32177648334b0f4074625b5588a64519804ba0553b12" [[package]] name = "cxxbridge-macro" -version = "1.0.80" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "309e4fb93eed90e1e14bea0da16b209f81813ba9fc7830c20ed151dd7bc0a4d7" +checksum = "1362b0ddcfc4eb0a1f57b68bd77dd99f0e826958a96abd0ae9bd092e114ffed6" dependencies = [ "proc-macro2", "quote", @@ -1231,8 +1541,18 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.13.4", + "darling_macro 0.13.4", +] + +[[package]] +name = "darling" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa" +dependencies = [ + "darling_core 0.14.2", + "darling_macro 0.14.2", ] [[package]] @@ -1249,13 +1569,38 @@ dependencies = [ "syn", ] +[[package]] +name = "darling_core" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn", +] + [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core", + "darling_core 0.13.4", + "quote", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" +dependencies = [ + "darling_core 0.14.2", "quote", "syn", ] @@ -1282,13 +1627,33 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" +checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" [[package]] -name = "database_manager" -version = "0.1.0" +name = "data-encoding-macro" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86927b7cd2fe88fa698b87404b287ab98d1a0063a34071d92e575b72d3029aca" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" +dependencies = [ + "data-encoding", + "syn", +] + +[[package]] +name = "database_manager" +version = "0.1.0" dependencies = [ "beacon_chain", "beacon_node", @@ -1336,14 +1701,43 @@ dependencies = [ [[package]] name = "der" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ "const-oid", + "pem-rfc7468", "zeroize", ] +[[package]] +name = "der-parser" +version = "7.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe398ac75057914d7d07307bf67dc7f3f574a26783b4fc7805a20ffa9f506e82" +dependencies = [ + "asn1-rs 0.3.1", + "displaydoc", + "nom 7.1.1", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "der-parser" +version = "8.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d4bc9b0db0a0df9ae64634ac5bdefb7afcb534e182275ca0beadbe486701c1" +dependencies = [ + "asn1-rs 0.5.1", + "displaydoc", + "nom 7.1.1", + "num-bigint", + "num-traits", + "rusticata-macros", +] + [[package]] name = "derivative" version = "2.2.0" @@ -1357,15 +1751,46 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.2.0" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8a16495aeb28047bb1185fca837baf755e7d71ed3aeed7f8504654ffa927208" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "derive_builder" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d07adf7be193b71cc36b193d0f5fe60b918a3a9db4dad0449f57bcfd519704a3" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4903dff04948f22033ca30232ab8eca2c3fc4c913a8b6a34ee5199699814817f" +checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" dependencies = [ + "darling 0.14.2", "proc-macro2", "quote", "syn", ] +[[package]] +name = "derive_builder_macro" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" +dependencies = [ + "derive_builder_core", + "syn", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -1390,9 +1815,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer 0.10.3", "crypto-common", @@ -1455,8 +1880,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d767c0e59b3e8d65222d95df723cc2ea1da92bb0f27c563607e6f0bde064f255" dependencies = [ - "aes", - "aes-gcm", + "aes 0.7.5", + "aes-gcm 0.9.4", "arrayvec", "delay_map", "enr", @@ -1466,8 +1891,8 @@ dependencies = [ "hex", "hkdf", "lazy_static", - "libp2p-core", - "lru", + "libp2p-core 0.36.0", + "lru 0.7.8", "more-asserts", "parking_lot 0.11.2", "rand 0.8.5", @@ -1483,6 +1908,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "displaydoc" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "dtoa" version = "1.0.4" @@ -1570,10 +2006,12 @@ dependencies = [ "base16ct", "crypto-bigint", "der", - "digest 0.10.5", + "digest 0.10.6", "ff", "generic-array", "group", + "hkdf", + "pem-rfc7468", "pkcs8", "rand_core 0.6.4", "sec1", @@ -1612,9 +2050,9 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" +checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck", "proc-macro2", @@ -1634,9 +2072,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.9.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c90bf5f19754d10198ccb95b70664fc925bd1fc090a0fd9a6ebc54acc8cd6272" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" dependencies = [ "atty", "humantime", @@ -1800,7 +2238,7 @@ dependencies = [ name = "eth2_keystore" version = "0.1.0" dependencies = [ - "aes", + "aes 0.7.5", "bls", "eth2_key_derivation", "hex", @@ -1814,7 +2252,7 @@ dependencies = [ "sha2 0.9.9", "tempfile", "unicode-normalization", - "uuid", + "uuid 0.8.2", "zeroize", ] @@ -1856,7 +2294,7 @@ dependencies = [ name = "eth2_ssz_derive" version = "0.3.0" dependencies = [ - "darling", + "darling 0.13.4", "proc-macro2", "quote", "syn", @@ -1892,7 +2330,7 @@ dependencies = [ "serde_repr", "tempfile", "tiny-bip39", - "uuid", + "uuid 0.8.2", ] [[package]] @@ -1973,7 +2411,7 @@ dependencies = [ "enr", "hex", "integer-sqrt", - "multiaddr", + "multiaddr 0.14.0", "rand 0.8.5", "serde", "serde_json", @@ -2076,6 +2514,12 @@ dependencies = [ "ws_stream_wasm", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + [[package]] name = "execution_engine_integration" version = "0.1.0" @@ -2120,7 +2564,7 @@ dependencies = [ "jsonwebtoken", "lazy_static", "lighthouse_metrics", - "lru", + "lru 0.7.8", "mev-build-rs", "parking_lot 0.12.1", "rand 0.8.5", @@ -2189,9 +2633,9 @@ dependencies = [ [[package]] name = "fastrlp-derive" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9e9158c1d8f0a7a716c9191562eaabba70268ba64972ef4871ce8d66fd08872" +checksum = "d6e454d03710df0cd95ce075d7731ce3fa35fb3779c15270cd491bc5f2ef9355" dependencies = [ "bytes", "proc-macro2", @@ -2215,13 +2659,19 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" +[[package]] +name = "fiat-crypto" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a214f5bb88731d436478f3ae1f8a277b62124089ba9fb67f4f93fb100ef73c90" + [[package]] name = "field-offset" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e1c54951450cbd39f3dbcf1005ac413b49487dabf18a720ad2383eccfeffb92" dependencies = [ - "memoffset", + "memoffset 0.6.5", "rustc_version 0.3.3", ] @@ -2253,9 +2703,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ "crc32fast", "libz-sys", @@ -2378,6 +2828,21 @@ version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +[[package]] +name = "futures-lite" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite 0.2.9", + "waker-fn", +] + [[package]] name = "futures-macro" version = "0.3.25" @@ -2502,6 +2967,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "ghash" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" +dependencies = [ + "opaque-debug", + "polyval 0.4.5", +] + [[package]] name = "ghash" version = "0.4.4" @@ -2509,14 +2984,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" dependencies = [ "opaque-debug", - "polyval", + "polyval 0.5.3", ] [[package]] name = "gimli" -version = "0.26.2" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" +checksum = "dec7af912d60cdbd3677c1af9352ebae6fb8394d165568a2234df0fa00f87793" [[package]] name = "git-version" @@ -2689,6 +3164,16 @@ dependencies = [ "digest 0.9.0", ] +[[package]] +name = "hmac" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" +dependencies = [ + "crypto-mac 0.10.1", + "digest 0.9.0", +] + [[package]] name = "hmac" version = "0.11.0" @@ -2705,7 +3190,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -2777,7 +3262,7 @@ dependencies = [ "lighthouse_network", "lighthouse_version", "logging", - "lru", + "lru 0.7.8", "network", "parking_lot 0.12.1", "proto_array", @@ -2842,9 +3327,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.22" +version = "0.14.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abfba89e19b959ca163c7752ba59d737c1ceea53a5d31a149c805446fc958064" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" dependencies = [ "bytes", "futures-channel", @@ -2866,9 +3351,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", @@ -2972,6 +3457,25 @@ dependencies = [ "libc", ] +[[package]] +name = "if-watch" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba7abdbb86e485125dad06c2691e1e393bf3b08c7b743b43aa162a00fd39062e" +dependencies = [ + "async-io", + "core-foundation", + "fnv", + "futures", + "if-addrs 0.7.0", + "ipnet", + "log", + "rtnetlink", + "system-configuration", + "tokio", + "windows", +] + [[package]] name = "igd" version = "0.11.1" @@ -3034,9 +3538,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg 1.1.0", "hashbrown 0.12.3", @@ -3072,23 +3576,42 @@ dependencies = [ "num-traits", ] +[[package]] +name = "interceptor" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ffaa4d24f546a18eaeee91f7b2c52e080e20e285e43cd7c27a527b4712cfdad" +dependencies = [ + "async-trait", + "bytes", + "log", + "rand 0.8.5", + "rtcp", + "rtp", + "thiserror", + "tokio", + "waitgroup", + "webrtc-srtp", + "webrtc-util", +] + [[package]] name = "ipconfig" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "723519edce41262b05d4143ceb95050e4c614f483e78e9fd9e39a8275a84ad98" +checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" dependencies = [ "socket2", "widestring 0.5.1", "winapi", - "winreg 0.7.0", + "winreg", ] [[package]] name = "ipnet" -version = "2.5.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" +checksum = "11b0d96e660696543b251e58030cf9787df56da39dab19ad60eae7353040917e" [[package]] name = "itertools" @@ -3137,9 +3660,9 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.1.1" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aa4b4af834c6cfd35d8763d359661b90f2e45d8f750a0849156c7f4671af09c" +checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" dependencies = [ "base64", "pem", @@ -3164,9 +3687,12 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838" +checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +dependencies = [ + "cpufeatures", +] [[package]] name = "lazy_static" @@ -3194,7 +3720,7 @@ dependencies = [ "clap_utils", "deposit_contract", "directory", - "env_logger 0.9.1", + "env_logger 0.9.3", "environment", "eth1_test_rig", "eth2", @@ -3244,9 +3770,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.137" +version = "0.2.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" +checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8" [[package]] name = "libflate" @@ -3270,9 +3796,9 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ "cfg-if", "winapi", @@ -3280,9 +3806,15 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.5" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" + +[[package]] +name = "libm" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "292a948cd991e376cf75541fe5b97a1081d713c618b4f1b9500f8844e49eb565" +checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" [[package]] name = "libmdbx" @@ -3301,33 +3833,33 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.48.0" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94c996fe5bfdba47f5a5af71d48ecbe8cec900b7b97391cc1d3ba1afb0e2d3b6" +checksum = "2e0a0d2f693675f49ded13c5d510c48b78069e23cbd9108d7ccd59f6dc568819" dependencies = [ "bytes", "futures", "futures-timer", "getrandom 0.2.8", "instant", - "lazy_static", - "libp2p-core", + "libp2p-core 0.38.0", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", + "libp2p-mdns", "libp2p-metrics", "libp2p-mplex", "libp2p-noise", "libp2p-plaintext", + "libp2p-quic", "libp2p-swarm", - "libp2p-swarm-derive", "libp2p-tcp", + "libp2p-webrtc", "libp2p-websocket", "libp2p-yamux", - "multiaddr", + "multiaddr 0.16.0", "parking_lot 0.12.1", "pin-project", - "rand 0.7.3", "smallvec", ] @@ -3348,9 +3880,44 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", - "multiaddr", + "multiaddr 0.14.0", + "multihash", + "multistream-select 0.11.0", + "p256", + "parking_lot 0.12.1", + "pin-project", + "prost", + "prost-build", + "rand 0.8.5", + "rw-stream-sink", + "sha2 0.10.6", + "smallvec", + "thiserror", + "unsigned-varint 0.7.1", + "void", + "zeroize", +] + +[[package]] +name = "libp2p-core" +version = "0.38.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6a8fcd392ff67af6cc3f03b1426c41f7f26b6b9aff2dc632c1c56dd649e571f" +dependencies = [ + "asn1_der", + "bs58", + "ed25519-dalek", + "either", + "fnv", + "futures", + "futures-timer", + "instant", + "libsecp256k1", + "log", + "multiaddr 0.16.0", "multihash", - "multistream-select", + "multistream-select 0.12.1", + "once_cell", "p256", "parking_lot 0.12.1", "pin-project", @@ -3358,6 +3925,7 @@ dependencies = [ "prost-build", "rand 0.8.5", "rw-stream-sink", + "sec1", "sha2 0.10.6", "smallvec", "thiserror", @@ -3368,12 +3936,12 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.36.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cb3c16e3bb2f76c751ae12f0f26e788c89d353babdded40411e7923f01fc978" +checksum = "8e42a271c1b49f789b92f7fc87749fa79ce5c7bdc88cbdfacb818a4bca47fec5" dependencies = [ "futures", - "libp2p-core", + "libp2p-core 0.38.0", "log", "parking_lot 0.12.1", "smallvec", @@ -3382,9 +3950,9 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.41.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2185aac44b162c95180ae4ddd1f4dfb705217ea1cb8e16bdfc70d31496fd80fa" +checksum = "a173171c71c29bb156f98886c7c4824596de3903dadf01e2e79d2ccdcf38cd9f" dependencies = [ "asynchronous-codec", "base64", @@ -3394,33 +3962,35 @@ dependencies = [ "futures", "hex_fmt", "instant", - "libp2p-core", + "libp2p-core 0.38.0", "libp2p-swarm", "log", "prometheus-client", "prost", "prost-build", - "rand 0.7.3", + "prost-codec", + "rand 0.8.5", "regex", "sha2 0.10.6", "smallvec", + "thiserror", "unsigned-varint 0.7.1", "wasm-timer", ] [[package]] name = "libp2p-identify" -version = "0.39.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f19440c84b509d69b13f0c9c28caa9bd3a059d25478527e937e86761f25c821e" +checksum = "647d6a99f8d5b7366ee6bcc608ec186e2fb58b497cf914c8409b803bd0f594a2" dependencies = [ "asynchronous-codec", "futures", "futures-timer", - "libp2p-core", + "libp2p-core 0.38.0", "libp2p-swarm", "log", - "lru", + "lru 0.8.1", "prost", "prost-build", "prost-codec", @@ -3429,13 +3999,33 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-mdns" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04f378264aade9872d6ccd315c0accc18be3a35d15fc1b9c36e5b6f983b62b5b" +dependencies = [ + "data-encoding", + "futures", + "if-watch", + "libp2p-core 0.38.0", + "libp2p-swarm", + "log", + "rand 0.8.5", + "smallvec", + "socket2", + "tokio", + "trust-dns-proto", + "void", +] + [[package]] name = "libp2p-metrics" -version = "0.9.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a74ab339e8b5d989e8c1000a78adb5c064a6319245bb22d1e70b415ec18c39b8" +checksum = "5ad8a64f29da86005c86a4d2728b8a0719e9b192f4092b609fd8790acb9dec55" dependencies = [ - "libp2p-core", + "libp2p-core 0.38.0", "libp2p-gossipsub", "libp2p-identify", "libp2p-swarm", @@ -3444,54 +4034,55 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.36.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce53169351226ee0eb18ee7bef8d38f308fa8ad7244f986ae776390c0ae8a44d" +checksum = "03805b44107aa013e7cbbfa5627b31c36cbedfdfb00603c0311998882bc4bace" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core", + "libp2p-core 0.38.0", "log", "nohash-hasher", "parking_lot 0.12.1", - "rand 0.7.3", + "rand 0.8.5", "smallvec", "unsigned-varint 0.7.1", ] [[package]] name = "libp2p-noise" -version = "0.39.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cb0f939a444b06779ce551b3d78ebf13970ac27906ada452fd70abd160b09b8" +checksum = "a978cb57efe82e892ec6f348a536bfbd9fee677adbe5689d7a93ad3a9bffbf2e" dependencies = [ "bytes", "curve25519-dalek 3.2.0", "futures", - "lazy_static", - "libp2p-core", + "libp2p-core 0.38.0", "log", + "once_cell", "prost", "prost-build", "rand 0.8.5", "sha2 0.10.6", "snow", "static_assertions", - "x25519-dalek", + "thiserror", + "x25519-dalek 1.1.1", "zeroize", ] [[package]] name = "libp2p-plaintext" -version = "0.36.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "328e8c654a55ac7f093eb96dfd0386244dd337f2bd2822dc019522b743ea8add" +checksum = "4c43ab37fb4102682ae9a248dc2e6a8e7b941ec75cf24aed103060a788e0fd15" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core", + "libp2p-core 0.38.0", "log", "prost", "prost-build", @@ -3499,31 +4090,54 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-quic" +version = "0.7.0-alpha" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01e7c867e95c8130667b24409d236d37598270e6da69b3baf54213ba31ffca59" +dependencies = [ + "bytes", + "futures", + "futures-timer", + "if-watch", + "libp2p-core 0.38.0", + "libp2p-tls", + "log", + "parking_lot 0.12.1", + "quinn-proto", + "rand 0.8.5", + "rustls 0.20.7", + "thiserror", + "tokio", +] + [[package]] name = "libp2p-swarm" -version = "0.39.0" +version = "0.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ad2db60c06603606b54b58e4247e32efec87a93cb4387be24bf32926c600f2" +checksum = "b2a35472fe3276b3855c00f1c032ea8413615e030256429ad5349cdf67c6e1a0" dependencies = [ "either", "fnv", "futures", "futures-timer", "instant", - "libp2p-core", + "libp2p-core 0.38.0", + "libp2p-swarm-derive", "log", "pin-project", - "rand 0.7.3", + "rand 0.8.5", "smallvec", "thiserror", + "tokio", "void", ] [[package]] name = "libp2p-swarm-derive" -version = "0.30.1" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eddc4497a8b5a506013c40e8189864f9c3a00db2b25671f428ae9007f3ba32" +checksum = "9d527d5827582abd44a6d80c07ff8b50b4ee238a8979e05998474179e79dc400" dependencies = [ "heck", "quote", @@ -3532,48 +4146,97 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.36.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9675432b4c94b3960f3d2c7e57427b81aea92aab67fd0eebef09e2ae0ff54895" +checksum = "b4b257baf6df8f2df39678b86c578961d48cc8b68642a12f0f763f56c8e5858d" dependencies = [ "futures", "futures-timer", - "if-addrs 0.7.0", - "ipnet", + "if-watch", "libc", - "libp2p-core", + "libp2p-core 0.38.0", "log", "socket2", "tokio", ] [[package]] -name = "libp2p-websocket" -version = "0.38.0" +name = "libp2p-tls" +version = "0.1.0-alpha" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de8a9e825cc03f2fc194d2e1622113d7fe18e1c7f4458a582b83140c9b9aea27" +checksum = "f7905ce0d040576634e8a3229a7587cc8beab83f79db6023800f1792895defa8" dependencies = [ - "either", "futures", "futures-rustls", - "libp2p-core", - "log", - "parking_lot 0.12.1", - "quicksink", - "rw-stream-sink", - "soketto", - "url", - "webpki-roots", -] + "libp2p-core 0.38.0", + "rcgen 0.10.0", + "ring", + "rustls 0.20.7", + "thiserror", + "webpki 0.22.0", + "x509-parser 0.14.0", + "yasna", +] [[package]] -name = "libp2p-yamux" +name = "libp2p-webrtc" +version = "0.4.0-alpha" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb6cd86dd68cba72308ea05de1cebf3ba0ae6e187c40548167955d4e3970f6a" +dependencies = [ + "async-trait", + "asynchronous-codec", + "bytes", + "futures", + "futures-timer", + "hex", + "if-watch", + "libp2p-core 0.38.0", + "libp2p-noise", + "log", + "multihash", + "prost", + "prost-build", + "prost-codec", + "rand 0.8.5", + "rcgen 0.9.3", + "serde", + "stun", + "thiserror", + "tinytemplate", + "tokio", + "tokio-util 0.7.4", + "webrtc", +] + +[[package]] +name = "libp2p-websocket" version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b74ec8dc042b583f0b2b93d52917f3b374c1e4b1cfa79ee74c7672c41257694c" +checksum = "1d705506030d5c0aaf2882437c70dab437605f21c5f9811978f694e6917a3b54" +dependencies = [ + "either", + "futures", + "futures-rustls", + "libp2p-core 0.38.0", + "log", + "parking_lot 0.12.1", + "quicksink", + "rw-stream-sink", + "soketto", + "url", + "webpki-roots", +] + +[[package]] +name = "libp2p-yamux" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f63594a0aa818642d9d4915c791945053877253f08a3626f13416b5cd928a29" dependencies = [ "futures", - "libp2p-core", + "libp2p-core 0.38.0", + "log", "parking_lot 0.12.1", "thiserror", "yamux", @@ -3662,7 +4325,7 @@ dependencies = [ "clap_utils", "database_manager", "directory", - "env_logger 0.9.1", + "env_logger 0.9.3", "environment", "eth1", "eth2_hashing", @@ -3717,7 +4380,7 @@ dependencies = [ "libp2p", "lighthouse_metrics", "lighthouse_version", - "lru", + "lru 0.7.8", "parking_lot 0.12.1", "prometheus-client", "quickcheck", @@ -3838,6 +4501,15 @@ dependencies = [ "hashbrown 0.12.3", ] +[[package]] +name = "lru" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" +dependencies = [ + "hashbrown 0.12.3", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -3906,6 +4578,15 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" +[[package]] +name = "md-5" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" +dependencies = [ + "digest 0.10.6", +] + [[package]] name = "mdbx-sys" version = "0.11.6-4" @@ -3932,6 +4613,15 @@ dependencies = [ "autocfg 1.1.0", ] +[[package]] +name = "memoffset" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +dependencies = [ + "autocfg 1.1.0", +] + [[package]] name = "merkle_proof" version = "0.2.0" @@ -3996,9 +4686,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.5.4" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" dependencies = [ "adler", ] @@ -4059,6 +4749,35 @@ dependencies = [ "url", ] +[[package]] +name = "multiaddr" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4aebdb21e90f81d13ed01dc84123320838e53963c2ca94b60b305d3fa64f31e" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "multibase", + "multihash", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint 0.7.1", + "url", +] + +[[package]] +name = "multibase" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +dependencies = [ + "base-x", + "data-encoding", + "data-encoding-macro", +] + [[package]] name = "multihash" version = "0.16.3" @@ -4066,7 +4785,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c346cf9999c631f002d8f977c4eaeaa0e6386f16007202308d0b3757522c2cc" dependencies = [ "core2", - "digest 0.10.5", + "digest 0.10.6", "multihash-derive", "sha2 0.10.6", "unsigned-varint 0.7.1", @@ -4074,11 +4793,11 @@ dependencies = [ [[package]] name = "multihash-derive" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc076939022111618a5026d3be019fd8b366e76314538ff9a1b59ffbcbf98bcd" +checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.1.3", "proc-macro-error", "proc-macro2", "quote", @@ -4124,11 +4843,25 @@ dependencies = [ "unsigned-varint 0.7.1", ] +[[package]] +name = "multistream-select" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8552ab875c1313b97b8d20cb857b9fd63e2d1d6a0a1b53ce9821e575405f27a" +dependencies = [ + "bytes", + "futures", + "log", + "pin-project", + "smallvec", + "unsigned-varint 0.7.1", +] + [[package]] name = "native-tls" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd7e2f3618557f980e0b17e8856252eee3c97fa12c54dff0ca290fb6266ca4a9" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", @@ -4142,6 +4875,72 @@ dependencies = [ "tempfile", ] +[[package]] +name = "netlink-packet-core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "345b8ab5bd4e71a2986663e88c56856699d060e78e152e6e9d7966fcd5491297" +dependencies = [ + "anyhow", + "byteorder", + "libc", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-route" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" +dependencies = [ + "anyhow", + "bitflags", + "byteorder", + "libc", + "netlink-packet-core", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-utils" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25af9cf0dc55498b7bd94a1508af7a78706aa0ab715a73c5169273e03c84845e" +dependencies = [ + "anyhow", + "byteorder", + "paste", + "thiserror", +] + +[[package]] +name = "netlink-proto" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65b4b14489ab424703c092062176d52ba55485a89c076b4f9db05092b7223aa6" +dependencies = [ + "bytes", + "futures", + "log", + "netlink-packet-core", + "netlink-sys", + "thiserror", + "tokio", +] + +[[package]] +name = "netlink-sys" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92b654097027250401127914afb37cb1f311df6610a9891ff07a757e94199027" +dependencies = [ + "bytes", + "futures", + "libc", + "log", + "tokio", +] + [[package]] name = "network" version = "0.2.0" @@ -4188,27 +4987,39 @@ dependencies = [ [[package]] name = "nix" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" +checksum = "8f3790c00a0150112de0f4cd161e3d7fc4b2d8a5542ffc35f099a2562aecb35c" dependencies = [ "bitflags", "cc", "cfg-if", "libc", - "memoffset", + "memoffset 0.6.5", ] [[package]] name = "nix" -version = "0.25.0" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e322c04a9e3440c327fca7b6c8a63e6890a32fa2ad689db972425f07e0d22abb" +checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ - "autocfg 1.1.0", "bitflags", "cfg-if", "libc", + "memoffset 0.6.5", +] + +[[package]] +name = "nix" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46a58d1d356c6597d08cde02c2f09d785b09e28711837b1ed667dc652c08a694" +dependencies = [ + "bitflags", + "cfg-if", + "libc", + "static_assertions", ] [[package]] @@ -4287,7 +5098,7 @@ dependencies = [ "autocfg 0.1.8", "byteorder", "lazy_static", - "libm", + "libm 0.2.6", "num-integer", "num-iter", "num-traits", @@ -4329,9 +5140,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" dependencies = [ "hermit-abi", "libc", @@ -4348,13 +5159,31 @@ dependencies = [ [[package]] name = "object" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" +checksum = "239da7f290cfa979f43f85a8efeee9a8a76d0827c356d37f9d3d7254d6b537fb" dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38e20717fa0541f39bd146692035c37bedfa532b3e5071b35761082407546b2a" +dependencies = [ + "asn1-rs 0.3.1", +] + +[[package]] +name = "oid-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" +dependencies = [ + "asn1-rs 0.5.1", +] + [[package]] name = "once_cell" version = "1.16.0" @@ -4382,9 +5211,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.42" +version = "0.10.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" +checksum = "29d971fd5722fec23977260f6e81aa67d2f22cadbdc2aa049f1022d9a3be1566" dependencies = [ "bitflags", "cfg-if", @@ -4414,18 +5243,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.22.0+1.1.1q" +version = "111.24.0+1.1.1s" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f31f0d509d1c1ae9cada2f9539ff8f37933831fd5098879e482aa687d659853" +checksum = "3498f259dab01178c6228c6b00dcef0ed2a2d5e20d648c017861227773ea4abd" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.77" +version = "0.9.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03b84c3b2d099b81f0953422b4d4ad58761589d0229b5506356afca05a3670a" +checksum = "5454462c0eced1e97f2ec09036abc8da362e66802f66fd20f86854d9d8cbcbc4" dependencies = [ "autocfg 1.1.0", "cc", @@ -4475,6 +5304,27 @@ dependencies = [ "sha2 0.10.6", ] +[[package]] +name = "p384" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" +dependencies = [ + "ecdsa", + "elliptic-curve", + "sha2 0.10.6", +] + +[[package]] +name = "packed_simd_2" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282" +dependencies = [ + "cfg-if", + "libm 0.1.4", +] + [[package]] name = "parity-scale-codec" version = "2.3.1" @@ -4509,7 +5359,7 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.1.3", "proc-macro2", "quote", "syn", @@ -4521,12 +5371,18 @@ version = "3.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.1.3", "proc-macro2", "quote", "syn", ] +[[package]] +name = "parking" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" + [[package]] name = "parking_lot" version = "0.11.2" @@ -4535,7 +5391,7 @@ checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", - "parking_lot_core 0.8.5", + "parking_lot_core 0.8.6", ] [[package]] @@ -4545,14 +5401,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.4", + "parking_lot_core 0.9.5", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ "cfg-if", "instant", @@ -4564,9 +5420,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" +checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" dependencies = [ "cfg-if", "libc", @@ -4577,9 +5433,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" +checksum = "cf1c2c742266c2f1041c914ba65355a83ae8747b05f208319784083583494b4b" [[package]] name = "pbkdf2" @@ -4614,6 +5470,15 @@ dependencies = [ "base64", ] +[[package]] +name = "pem-rfc7468" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.2.0" @@ -4622,9 +5487,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.4.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc7bc69c062e492337d74d59b120c274fd3d261b6bf6d3207d499b4b379c41a" +checksum = "cc8bed3549e0f9b0a2a78bf7c0018237a2cdf085eecbbc048e52612438e4e9d0" dependencies = [ "thiserror", "ucd-trie", @@ -4710,6 +5575,12 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" +[[package]] +name = "platforms" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" + [[package]] name = "plotters" version = "0.3.4" @@ -4738,6 +5609,20 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "polling" +version = "2.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" +dependencies = [ + "autocfg 1.1.0", + "cfg-if", + "libc", + "log", + "wepoll-ffi", + "windows-sys 0.42.0", +] + [[package]] name = "poly1305" version = "0.7.2" @@ -4749,6 +5634,17 @@ dependencies = [ "universal-hash", ] +[[package]] +name = "polyval" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" +dependencies = [ + "cpuid-bool", + "opaque-debug", + "universal-hash", +] + [[package]] name = "polyval" version = "0.5.3" @@ -4763,9 +5659,19 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "prettyplease" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c142c0e46b57171fe0c528bee8c5b7569e80f0c17e377cd0e30ea57dbc11bb51" +dependencies = [ + "proc-macro2", + "syn", +] [[package]] name = "primitive-types" @@ -4795,18 +5701,26 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.2.1" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" dependencies = [ - "once_cell", - "thiserror", "toml", ] [[package]] -name = "proc-macro-error" -version = "1.0.4" +name = "proc-macro-crate" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +dependencies = [ + "thiserror", + "toml", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ @@ -4895,9 +5809,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.11.0" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7" +checksum = "c0b18e655c21ff5ac2084a5ad0611e827b3f92badf79f4910b5a5c58f4d87ff0" dependencies = [ "bytes", "prost-derive", @@ -4905,9 +5819,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.1" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f835c582e6bd972ba8347313300219fed5bfa52caf175298d860b61ff6069bb" +checksum = "276470f7f281b0ed53d2ae42dd52b4a8d08853a3c70e7fe95882acbb98a6ae94" dependencies = [ "bytes", "heck", @@ -4916,18 +5830,20 @@ dependencies = [ "log", "multimap", "petgraph", + "prettyplease", "prost", "prost-types", "regex", + "syn", "tempfile", "which", ] [[package]] name = "prost-codec" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "011ae9ff8359df7915f97302d591cdd9e0e27fbd5a4ddc5bd13b71079bb20987" +checksum = "0dc34979ff898b6e141106178981ce2596c387ea6e62533facfc61a37fc879c0" dependencies = [ "asynchronous-codec", "bytes", @@ -4938,9 +5854,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.0" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7345d5f0e08c0536d7ac7229952590239e77abf0a0100a1b1d890add6ea96364" +checksum = "164ae68b6587001ca506d3bf7f1000bfa248d0e1217b618108fba4ec1d0cc306" dependencies = [ "anyhow", "itertools", @@ -4951,9 +5867,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e" +checksum = "747761bc3dc48f9a34553bf65605cf6cb6288ba219f3450b4275dbd81539551a" dependencies = [ "bytes", "prost", @@ -4989,14 +5905,34 @@ dependencies = [ "derive_more", "glob", "mach", - "nix 0.23.1", + "nix 0.23.2", "num_cpus", "once_cell", - "platforms", + "platforms 2.0.0", "thiserror", "unescape", ] +[[package]] +name = "ptr_meta" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" +dependencies = [ + "ptr_meta_derive", +] + +[[package]] +name = "ptr_meta_derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -5037,6 +5973,24 @@ dependencies = [ "pin-project-lite 0.1.12", ] +[[package]] +name = "quinn-proto" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4ced82a24bb281af338b9e8f94429b6eca01b4e66d899f40031f074e74c9" +dependencies = [ + "bytes", + "rand 0.8.5", + "ring", + "rustc-hash", + "rustls 0.20.7", + "slab", + "thiserror", + "tinyvec", + "tracing", + "webpki 0.22.0", +] + [[package]] name = "quote" version = "1.0.21" @@ -5161,21 +6115,19 @@ dependencies = [ [[package]] name = "rayon" -version = "1.5.3" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" +checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" dependencies = [ - "autocfg 1.1.0", - "crossbeam-deque", "either", "rayon-core", ] [[package]] name = "rayon-core" -version = "1.9.3" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" +checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -5183,6 +6135,31 @@ dependencies = [ "num_cpus", ] +[[package]] +name = "rcgen" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" +dependencies = [ + "pem", + "ring", + "time 0.3.17", + "x509-parser 0.13.2", + "yasna", +] + +[[package]] +name = "rcgen" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" +dependencies = [ + "pem", + "ring", + "time 0.3.17", + "yasna", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -5205,9 +6182,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" dependencies = [ "aho-corasick", "memchr", @@ -5225,9 +6202,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] name = "remove_dir_all" @@ -5238,11 +6215,20 @@ dependencies = [ "winapi", ] +[[package]] +name = "rend" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79af64b4b6362ffba04eef3a4e10829718a4896dac19daa741851c86781edf95" +dependencies = [ + "bytecheck", +] + [[package]] name = "reqwest" -version = "0.11.12" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "431949c384f4e2ae07605ccaa56d1d9d2ecdb5cadd4f9577ccfab29f2e5149fc" +checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" dependencies = [ "base64", "bytes", @@ -5278,7 +6264,7 @@ dependencies = [ "wasm-bindgen-futures", "web-sys", "webpki-roots", - "winreg 0.10.1", + "winreg", ] [[package]] @@ -5293,9 +6279,9 @@ dependencies = [ [[package]] name = "rfc6979" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88c86280f057430a52f4861551b092a01b419b8eacefc7c995eacb9dc132fe32" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ "crypto-bigint", "hmac 0.12.1", @@ -5317,6 +6303,31 @@ dependencies = [ "winapi", ] +[[package]] +name = "rkyv" +version = "0.7.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15" +dependencies = [ + "bytecheck", + "hashbrown 0.12.3", + "ptr_meta", + "rend", + "rkyv_derive", + "seahash", +] + +[[package]] +name = "rkyv_derive" +version = "0.7.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "rle-decode-fast" version = "1.0.3" @@ -5354,6 +6365,46 @@ dependencies = [ "winapi", ] +[[package]] +name = "rtcp" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c11171e7e37998dcf54a9e9d4a6e2e1932c994424c7d39bc6349fed1424c45c3" +dependencies = [ + "bytes", + "thiserror", + "webrtc-util", +] + +[[package]] +name = "rtnetlink" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322c53fd76a18698f1c27381d58091de3a043d356aa5bd0d510608b565f469a0" +dependencies = [ + "futures", + "log", + "netlink-packet-route", + "netlink-proto", + "nix 0.24.3", + "thiserror", + "tokio", +] + +[[package]] +name = "rtp" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2a095411ff00eed7b12e4c6a118ba984d113e1079582570d56a5ee723f11f80" +dependencies = [ + "async-trait", + "bytes", + "rand 0.8.5", + "serde", + "thiserror", + "webrtc-util", +] + [[package]] name = "rusqlite" version = "0.25.4" @@ -5371,13 +6422,20 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.26.1" +version = "1.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c" +checksum = "33c321ee4e17d2b7abe12b5d20c1231db708dd36185c8a21e9de5fed6da4dbe9" dependencies = [ "arrayvec", + "borsh", + "bytecheck", + "byteorder", + "bytes", "num-traits", + "rand 0.8.5", + "rkyv", "serde", + "serde_json", ] [[package]] @@ -5425,6 +6483,15 @@ dependencies = [ "semver 1.0.14", ] +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom 7.1.1", +] + [[package]] name = "rustls" version = "0.19.1" @@ -5498,7 +6565,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecbd2eb639fd7cab5804a0837fe373cc2172d15437e804c054a9fb885cb923b0" dependencies = [ - "cipher", + "cipher 0.3.0", ] [[package]] @@ -5579,6 +6646,24 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sdp" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d22a5ef407871893fd72b4562ee15e4742269b173959db4b8df6f538c414e13" +dependencies = [ + "rand 0.8.5", + "substring", + "thiserror", + "url", +] + +[[package]] +name = "seahash" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" + [[package]] name = "sec1" version = "0.3.0" @@ -5689,9 +6774,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.147" +version = "1.0.150" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" +checksum = "e326c9ec8042f1b5da33252c8a37e9ffbd2c9bef0155215b6e6c80c790e05f91" dependencies = [ "serde_derive", ] @@ -5718,9 +6803,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.147" +version = "1.0.150" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" +checksum = "42a3df25b0713732468deadad63ab9da1f1fd75a48a15024b50363f128db627e" dependencies = [ "proc-macro2", "quote", @@ -5729,9 +6814,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.87" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" +checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" dependencies = [ "itoa 1.0.4", "ryu", @@ -5777,7 +6862,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling", + "darling 0.13.4", "proc-macro2", "quote", "syn", @@ -5810,13 +6895,13 @@ dependencies = [ [[package]] name = "sha-1" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -5827,7 +6912,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -5851,7 +6936,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -5872,7 +6957,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", "keccak", ] @@ -5906,7 +6991,7 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", "rand_core 0.6.4", ] @@ -5919,7 +7004,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.16", + "time 0.3.17", ] [[package]] @@ -5927,7 +7012,7 @@ name = "simulator" version = "0.2.0" dependencies = [ "clap", - "env_logger 0.9.1", + "env_logger 0.9.3", "eth1", "eth1_test_rig", "execution_layer", @@ -5965,7 +7050,7 @@ dependencies = [ "lmdb-rkv", "lmdb-rkv-sys", "logging", - "lru", + "lru 0.7.8", "maplit", "parking_lot 0.12.1", "rand 0.8.5", @@ -6045,7 +7130,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.16", + "time 0.3.17", ] [[package]] @@ -6090,7 +7175,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.16", + "time 0.3.17", ] [[package]] @@ -6135,9 +7220,9 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "snap" -version = "1.0.5" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45456094d1983e2ee2a18fdfebce3189fa451699d0502cb8e3b49dba5ba41451" +checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" @@ -6145,10 +7230,10 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "774d05a3edae07ce6d68ea6984f3c05e9bba8927e3dd591e3b479e5b03213d0d" dependencies = [ - "aes-gcm", + "aes-gcm 0.9.4", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-pre.1", + "curve25519-dalek 4.0.0-pre.5", "rand_core 0.6.4", "ring", "rustc_version 0.4.0", @@ -6231,7 +7316,7 @@ dependencies = [ "beacon_chain", "bls", "derivative", - "env_logger 0.9.1", + "env_logger 0.9.3", "eth2_hashing", "eth2_ssz", "eth2_ssz_derive", @@ -6281,7 +7366,7 @@ dependencies = [ "lazy_static", "leveldb", "lighthouse_metrics", - "lru", + "lru 0.7.8", "parking_lot 0.12.1", "serde", "serde_derive", @@ -6327,6 +7412,34 @@ dependencies = [ "syn", ] +[[package]] +name = "stun" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7e94b1ec00bad60e6410e058b52f1c66de3dc5fe4d62d09b3e52bb7d3b73e25" +dependencies = [ + "base64", + "crc", + "lazy_static", + "md-5", + "rand 0.8.5", + "ring", + "subtle", + "thiserror", + "tokio", + "url", + "webrtc-util", +] + +[[package]] +name = "substring" +version = "1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ee6433ecef213b2e72f587ef64a2f5943e7cd16fbd82dbe8bc07486c534c86" +dependencies = [ + "autocfg 1.1.0", +] + [[package]] name = "subtle" version = "2.4.1" @@ -6339,7 +7452,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a99807a055ff4ff5d249bb84c80d9eabb55ca3c452187daae43fd5b51ef695" dependencies = [ - "darling", + "darling 0.13.4", "itertools", "proc-macro2", "quote", @@ -6358,9 +7471,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.103" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" +checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" dependencies = [ "proc-macro2", "quote", @@ -6387,9 +7500,9 @@ dependencies = [ [[package]] name = "sysinfo" -version = "0.26.7" +version = "0.26.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c375d5fd899e32847b8566e10598d6e9f1d9b55ec6de3cdf9e7da4bdc51371bc" +checksum = "29ddf41e393a9133c81d5f0974195366bd57082deac6e0eb02ed39b8341c2bb6" dependencies = [ "cfg-if", "core-foundation-sys", @@ -6400,6 +7513,27 @@ dependencies = [ "winapi", ] +[[package]] +name = "system-configuration" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d75182f12f490e953596550b65ee31bda7c8e043d9386174b353bda50838c3fd" +dependencies = [ + "bitflags", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "system_health" version = "0.1.0" @@ -6550,9 +7684,9 @@ dependencies = [ [[package]] name = "time" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ "libc", "wasi 0.10.0+wasi-snapshot-preview1", @@ -6561,9 +7695,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fab5c8b9980850e06d92ddbe3ab839c062c801f3927c0fb8abd6fc8e918fbca" +checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ "itoa 1.0.4", "libc", @@ -6581,9 +7715,9 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bb801831d812c562ae7d2bfb531f26e66e4e1f6b17307ba4149c5064710e5b" +checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" dependencies = [ "time-core", ] @@ -6654,9 +7788,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.21.2" +version = "1.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" +checksum = "eab6d665857cc6ca78d6e80303a02cea7a7851e85dfbd77cbdc09bd129f1ef46" dependencies = [ "autocfg 1.1.0", "bytes", @@ -6669,7 +7803,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "winapi", + "windows-sys 0.42.0", ] [[package]] @@ -6684,9 +7818,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", @@ -6790,6 +7924,7 @@ checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", "pin-project-lite 0.2.9", "tokio", @@ -6798,9 +7933,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" dependencies = [ "serde", ] @@ -6823,9 +7958,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" +checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" dependencies = [ "bitflags", "bytes", @@ -6963,16 +8098,16 @@ dependencies = [ name = "tree_hash_derive" version = "0.4.0" dependencies = [ - "darling", + "darling 0.13.4", "quote", "syn", ] [[package]] name = "trust-dns-proto" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c31f240f59877c3d4bb3b3ea0ec5a6a0cff07323580ff8c7a605cd7d08b255d" +checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" dependencies = [ "async-trait", "cfg-if", @@ -6984,32 +8119,33 @@ dependencies = [ "idna 0.2.3", "ipnet", "lazy_static", - "log", "rand 0.8.5", "smallvec", + "socket2", "thiserror", "tinyvec", "tokio", + "tracing", "url", ] [[package]] name = "trust-dns-resolver" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ba72c2ea84515690c9fcef4c6c660bb9df3036ed1051686de84605b74fd558" +checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" dependencies = [ "cfg-if", "futures-util", "ipconfig", "lazy_static", - "log", "lru-cache", "parking_lot 0.12.1", "resolv-conf", "smallvec", "thiserror", "tokio", + "tracing", "trust-dns-proto", ] @@ -7052,13 +8188,32 @@ dependencies = [ "log", "rand 0.8.5", "rustls 0.20.7", - "sha-1 0.10.0", + "sha-1 0.10.1", "thiserror", "url", "utf-8", "webpki 0.22.0", ] +[[package]] +name = "turn" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4712ee30d123ec7ae26d1e1b218395a16c87cdbaf4b3925d170d684af62ea5e8" +dependencies = [ + "async-trait", + "base64", + "futures", + "log", + "md-5", + "rand 0.8.5", + "ring", + "stun", + "thiserror", + "tokio", + "webrtc-util", +] + [[package]] name = "twoway" version = "0.1.8" @@ -7070,9 +8225,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "types" @@ -7132,9 +8287,9 @@ checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" [[package]] name = "uint" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a45526d29728d135c2900b0d30573fe3ee79fceb12ef534c7bb30e810a91b601" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" dependencies = [ "arbitrary", "byteorder", @@ -7258,6 +8413,15 @@ dependencies = [ "serde", ] +[[package]] +name = "uuid" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" +dependencies = [ + "getrandom 0.2.8", +] + [[package]] name = "validator_client" version = "0.3.5" @@ -7360,6 +8524,21 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +[[package]] +name = "waitgroup" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1f50000a783467e6c0200f9d10642f4bc424e39efc1b770203e88b488f79292" +dependencies = [ + "atomic-waker", +] + +[[package]] +name = "waker-fn" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" + [[package]] name = "walkdir" version = "2.3.2" @@ -7656,13 +8835,234 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368bfe657969fb01238bb756d351dcade285e0f6fcbd36dcb23359a5169975be" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ "webpki 0.22.0", ] +[[package]] +name = "webrtc" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d3bc9049bdb2cea52f5fd4f6f728184225bdb867ed0dc2410eab6df5bdd67bb" +dependencies = [ + "arc-swap", + "async-trait", + "bytes", + "hex", + "interceptor", + "lazy_static", + "log", + "rand 0.8.5", + "rcgen 0.9.3", + "regex", + "ring", + "rtcp", + "rtp", + "rustls 0.19.1", + "sdp", + "serde", + "serde_json", + "sha2 0.10.6", + "stun", + "thiserror", + "time 0.3.17", + "tokio", + "turn", + "url", + "waitgroup", + "webrtc-data", + "webrtc-dtls", + "webrtc-ice", + "webrtc-mdns", + "webrtc-media", + "webrtc-sctp", + "webrtc-srtp", + "webrtc-util", +] + +[[package]] +name = "webrtc-data" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ef36a4d12baa6e842582fe9ec16a57184ba35e1a09308307b67d43ec8883100" +dependencies = [ + "bytes", + "derive_builder", + "log", + "thiserror", + "tokio", + "webrtc-sctp", + "webrtc-util", +] + +[[package]] +name = "webrtc-dtls" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7021987ae0a2ed6c8cd33f68e98e49bb6e74ffe9543310267b48a1bbe3900e5f" +dependencies = [ + "aes 0.6.0", + "aes-gcm 0.8.0", + "async-trait", + "bincode", + "block-modes", + "byteorder", + "ccm", + "curve25519-dalek 3.2.0", + "der-parser 8.1.0", + "elliptic-curve", + "hkdf", + "hmac 0.10.1", + "log", + "oid-registry 0.6.1", + "p256", + "p384", + "rand 0.8.5", + "rand_core 0.6.4", + "rcgen 0.9.3", + "ring", + "rustls 0.19.1", + "sec1", + "serde", + "sha-1 0.9.8", + "sha2 0.9.9", + "signature", + "subtle", + "thiserror", + "tokio", + "webpki 0.21.4", + "webrtc-util", + "x25519-dalek 2.0.0-pre.1", + "x509-parser 0.13.2", +] + +[[package]] +name = "webrtc-ice" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "494483fbb2f5492620871fdc78b084aed8807377f6e3fe88b2e49f0a9c9c41d7" +dependencies = [ + "arc-swap", + "async-trait", + "crc", + "log", + "rand 0.8.5", + "serde", + "serde_json", + "stun", + "thiserror", + "tokio", + "turn", + "url", + "uuid 1.2.2", + "waitgroup", + "webrtc-mdns", + "webrtc-util", +] + +[[package]] +name = "webrtc-mdns" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2548ae970afc2ae8e0b0dbfdacd9602d426d4f0ff6cda4602a45c0fd7ceaa82a" +dependencies = [ + "log", + "socket2", + "thiserror", + "tokio", + "webrtc-util", +] + +[[package]] +name = "webrtc-media" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2a3c157a040324e5049bcbd644ffc9079e6738fa2cfab2bcff64e5cc4c00d7" +dependencies = [ + "byteorder", + "bytes", + "derive_builder", + "displaydoc", + "rand 0.8.5", + "rtp", + "thiserror", + "webrtc-util", +] + +[[package]] +name = "webrtc-sctp" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d47adcd9427eb3ede33d5a7f3424038f63c965491beafcc20bc650a2f6679c0" +dependencies = [ + "arc-swap", + "async-trait", + "bytes", + "crc", + "log", + "rand 0.8.5", + "thiserror", + "tokio", + "webrtc-util", +] + +[[package]] +name = "webrtc-srtp" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6183edc4c1c6c0175f8812eefdce84dfa0aea9c3ece71c2bf6ddd3c964de3da5" +dependencies = [ + "aead 0.4.3", + "aes 0.7.5", + "aes-gcm 0.9.4", + "async-trait", + "byteorder", + "bytes", + "ctr 0.8.0", + "hmac 0.11.0", + "log", + "rtcp", + "rtp", + "sha-1 0.9.8", + "subtle", + "thiserror", + "tokio", + "webrtc-util", +] + +[[package]] +name = "webrtc-util" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93f1db1727772c05cf7a2cfece52c3aca8045ca1e176cd517d323489aa3c6d87" +dependencies = [ + "async-trait", + "bitflags", + "bytes", + "cc", + "ipnet", + "lazy_static", + "libc", + "log", + "nix 0.24.3", + "rand 0.8.5", + "thiserror", + "tokio", + "winapi", +] + +[[package]] +name = "wepoll-ffi" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" +dependencies = [ + "cc", +] + [[package]] name = "which" version = "4.3.0" @@ -7717,6 +9117,19 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45296b64204227616fdbf2614cefa4c236b98ee64dfaaaa435207ed99fe7829f" +dependencies = [ + "windows_aarch64_msvc 0.34.0", + "windows_i686_gnu 0.34.0", + "windows_i686_msvc 0.34.0", + "windows_x86_64_gnu 0.34.0", + "windows_x86_64_msvc 0.34.0", +] + [[package]] name = "windows-acl" version = "0.3.0" @@ -7763,6 +9176,12 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +[[package]] +name = "windows_aarch64_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" + [[package]] name = "windows_aarch64_msvc" version = "0.36.1" @@ -7775,6 +9194,12 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +[[package]] +name = "windows_i686_gnu" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" + [[package]] name = "windows_i686_gnu" version = "0.36.1" @@ -7787,6 +9212,12 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +[[package]] +name = "windows_i686_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" + [[package]] name = "windows_i686_msvc" version = "0.36.1" @@ -7799,6 +9230,12 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +[[package]] +name = "windows_x86_64_gnu" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" + [[package]] name = "windows_x86_64_gnu" version = "0.36.1" @@ -7817,6 +9254,12 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +[[package]] +name = "windows_x86_64_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" + [[package]] name = "windows_x86_64_msvc" version = "0.36.1" @@ -7829,15 +9272,6 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" -[[package]] -name = "winreg" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" -dependencies = [ - "winapi", -] - [[package]] name = "winreg" version = "0.10.1" @@ -7873,9 +9307,9 @@ checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" [[package]] name = "wyz" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b31594f29d27036c383b53b59ed3476874d518f0efb151b27a4c275141390e" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" dependencies = [ "tap", ] @@ -7891,6 +9325,54 @@ dependencies = [ "zeroize", ] +[[package]] +name = "x25519-dalek" +version = "2.0.0-pre.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df" +dependencies = [ + "curve25519-dalek 3.2.0", + "rand_core 0.6.4", + "zeroize", +] + +[[package]] +name = "x509-parser" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb9bace5b5589ffead1afb76e43e34cff39cd0f3ce7e170ae0c29e53b88eb1c" +dependencies = [ + "asn1-rs 0.3.1", + "base64", + "data-encoding", + "der-parser 7.0.0", + "lazy_static", + "nom 7.1.1", + "oid-registry 0.4.0", + "ring", + "rusticata-macros", + "thiserror", + "time 0.3.17", +] + +[[package]] +name = "x509-parser" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" +dependencies = [ + "asn1-rs 0.5.1", + "base64", + "data-encoding", + "der-parser 8.1.0", + "lazy_static", + "nom 7.1.1", + "oid-registry 0.6.1", + "rusticata-macros", + "thiserror", + "time 0.3.17", +] + [[package]] name = "xml-rs" version = "0.8.4" @@ -7929,6 +9411,15 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "yasna" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aed2e7a52e3744ab4d0c05c20aa065258e84c49fd4226f5191b2ed29712710b4" +dependencies = [ + "time 0.3.17", +] + [[package]] name = "zeroize" version = "1.5.7" @@ -7940,9 +9431,9 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" dependencies = [ "proc-macro2", "quote", @@ -7961,5 +9452,5 @@ dependencies = [ "crc32fast", "flate2", "thiserror", - "time 0.1.44", + "time 0.1.45", ] diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index 9d6ad4050bf..7c228d9803f 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -7,7 +7,13 @@ use eth2::{BeaconNodeHttpClient, Timeouts}; use http_api::{Config, Context}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, - libp2p::{core::connection::ConnectionId, swarm::NetworkBehaviour}, + libp2p::{ + core::connection::ConnectionId, + swarm::{ + behaviour::{ConnectionEstablished, FromSwarm}, + NetworkBehaviour, + }, + }, rpc::methods::{MetaData, MetaDataV2}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, ConnectedPoint, Enr, NetworkGlobals, PeerId, PeerManager, @@ -143,12 +149,18 @@ pub async fn create_api_server_on_port( // add a peer let peer_id = PeerId::random(); - let connected_point = ConnectedPoint::Listener { + let endpoint = &ConnectedPoint::Listener { local_addr: EXTERNAL_ADDR.parse().unwrap(), send_back_addr: EXTERNAL_ADDR.parse().unwrap(), }; - let con_id = ConnectionId::new(1); - pm.inject_connection_established(&peer_id, &con_id, &connected_point, None, 0); + let connection_id = ConnectionId::new(1); + pm.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { + peer_id, + connection_id, + endpoint, + failed_addresses: &[], + other_established: 0, + })); *network_globals.sync_state.write() = SyncState::Synced; let eth1_service = diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 977f0a10887..474ebebb507 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -40,11 +40,12 @@ superstruct = "0.5.0" prometheus-client = "0.18.0" unused_port = { path = "../../common/unused_port" } delay_map = "0.1.1" +void = "1" [dependencies.libp2p] -version = "0.48.0" +version = "0.50.0" default-features = false -features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"] +features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"] [dev-dependencies] slog-term = "2.6.0" diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 8e528f09d2c..c41844c2c59 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -22,12 +22,13 @@ use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_EN use futures::prelude::*; use futures::stream::FuturesUnordered; use libp2p::multiaddr::Protocol; +use libp2p::swarm::behaviour::{DialFailure, FromSwarm}; use libp2p::swarm::AddressScore; pub use libp2p::{ core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}, swarm::{ - handler::ConnectionHandler, DialError, NetworkBehaviour, - NetworkBehaviourAction as NBAction, NotifyHandler, PollParameters, SubstreamProtocol, + dummy::ConnectionHandler, DialError, NetworkBehaviour, NetworkBehaviourAction as NBAction, + NotifyHandler, PollParameters, SubstreamProtocol, }, }; use lru::LruCache; @@ -927,11 +928,11 @@ impl Discovery { impl NetworkBehaviour for Discovery { // Discovery is not a real NetworkBehaviour... - type ConnectionHandler = libp2p::swarm::handler::DummyConnectionHandler; + type ConnectionHandler = ConnectionHandler; type OutEvent = DiscoveredPeers; fn new_handler(&mut self) -> Self::ConnectionHandler { - libp2p::swarm::handler::DummyConnectionHandler::default() + ConnectionHandler } // Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them. @@ -947,40 +948,6 @@ impl NetworkBehaviour for Discovery { } } - fn inject_event( - &mut self, - _: PeerId, - _: ConnectionId, - _: ::OutEvent, - ) { - } - - fn inject_dial_failure( - &mut self, - peer_id: Option, - _handler: Self::ConnectionHandler, - error: &DialError, - ) { - if let Some(peer_id) = peer_id { - match error { - DialError::Banned - | DialError::LocalPeerId - | DialError::InvalidPeerId(_) - | DialError::ConnectionIo(_) - | DialError::NoAddresses - | DialError::Transport(_) - | DialError::WrongPeerId { .. } => { - // set peer as disconnected in discovery DHT - debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id); - self.disconnect_peer(&peer_id); - } - DialError::ConnectionLimit(_) - | DialError::DialPeerConditionFalse(_) - | DialError::Aborted => {} - } - } - } - // Main execution loop to drive the behaviour fn poll( &mut self, @@ -1067,6 +1034,50 @@ impl NetworkBehaviour for Discovery { } Poll::Pending } + + fn on_swarm_event(&mut self, event: FromSwarm) { + match event { + FromSwarm::DialFailure(DialFailure { peer_id, error, .. }) => { + self.on_dial_failure(peer_id, error) + } + FromSwarm::ConnectionEstablished(_) + | FromSwarm::ConnectionClosed(_) + | FromSwarm::AddressChange(_) + | FromSwarm::ListenFailure(_) + | FromSwarm::NewListener(_) + | FromSwarm::NewListenAddr(_) + | FromSwarm::ExpiredListenAddr(_) + | FromSwarm::ListenerError(_) + | FromSwarm::ListenerClosed(_) + | FromSwarm::NewExternalAddr(_) + | FromSwarm::ExpiredExternalAddr(_) => { + // Ignore events not relevant to discovery + } + } + } +} + +impl Discovery { + fn on_dial_failure(&mut self, peer_id: Option, error: &DialError) { + if let Some(peer_id) = peer_id { + match error { + DialError::Banned + | DialError::LocalPeerId + | DialError::InvalidPeerId(_) + | DialError::ConnectionIo(_) + | DialError::NoAddresses + | DialError::Transport(_) + | DialError::WrongPeerId { .. } => { + // set peer as disconnected in discovery DHT + debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id); + self.disconnect_peer(&peer_id); + } + DialError::ConnectionLimit(_) + | DialError::DialPeerConditionFalse(_) + | DialError::Aborted => {} + } + } + } } #[cfg(test)] diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index a468239a9e4..89670a2eb3c 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -7,7 +7,7 @@ use crate::{NetworkGlobals, PeerId}; use crate::{Subnet, SubnetDiscovery}; use delay_map::HashSetDelay; use discv5::Enr; -use libp2p::identify::IdentifyInfo; +use libp2p::identify::Info as IdentifyInfo; use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; use slog::{debug, error, trace, warn}; diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 175dfaf0188..42eb270c40e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -1,14 +1,12 @@ use std::task::{Context, Poll}; use futures::StreamExt; -use libp2p::core::connection::ConnectionId; use libp2p::core::ConnectedPoint; +use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; -use libp2p::swarm::handler::DummyConnectionHandler; -use libp2p::swarm::{ - ConnectionHandler, DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters, -}; -use libp2p::{Multiaddr, PeerId}; +use libp2p::swarm::dummy::ConnectionHandler; +use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; +use libp2p::PeerId; use slog::{debug, error}; use types::EthSpec; @@ -20,23 +18,14 @@ use super::peerdb::BanResult; use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource}; impl NetworkBehaviour for PeerManager { - type ConnectionHandler = DummyConnectionHandler; + type ConnectionHandler = ConnectionHandler; type OutEvent = PeerManagerEvent; /* Required trait members */ fn new_handler(&mut self) -> Self::ConnectionHandler { - DummyConnectionHandler::default() - } - - fn inject_event( - &mut self, - _: PeerId, - _: ConnectionId, - _: ::OutEvent, - ) { - unreachable!("Dummy handler does not emit events") + ConnectionHandler } fn poll( @@ -114,19 +103,46 @@ impl NetworkBehaviour for PeerManager { Poll::Pending } - /* Overwritten trait members */ + fn on_swarm_event(&mut self, event: FromSwarm) { + match event { + FromSwarm::ConnectionEstablished(ConnectionEstablished { + peer_id, + endpoint, + other_established, + .. + }) => self.on_connection_established(peer_id, endpoint, other_established), + FromSwarm::ConnectionClosed(ConnectionClosed { + peer_id, + remaining_established, + .. + }) => self.on_connection_closed(peer_id, remaining_established), + FromSwarm::DialFailure(DialFailure { peer_id, .. }) => self.on_dial_failure(peer_id), + FromSwarm::AddressChange(_) + | FromSwarm::ListenFailure(_) + | FromSwarm::NewListener(_) + | FromSwarm::NewListenAddr(_) + | FromSwarm::ExpiredListenAddr(_) + | FromSwarm::ListenerError(_) + | FromSwarm::ListenerClosed(_) + | FromSwarm::NewExternalAddr(_) + | FromSwarm::ExpiredExternalAddr(_) => { + // The rest of the events we ignore since they are handled in their associated + // `SwarmEvent` + } + } + } +} - fn inject_connection_established( +impl PeerManager { + fn on_connection_established( &mut self, - peer_id: &PeerId, - _connection_id: &ConnectionId, + peer_id: PeerId, endpoint: &ConnectedPoint, - _failed_addresses: Option<&Vec>, other_established: usize, ) { debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => ?endpoint.to_endpoint()); if other_established == 0 { - self.events.push(PeerManagerEvent::MetaData(*peer_id)); + self.events.push(PeerManagerEvent::MetaData(peer_id)); } // Check NAT if metrics are enabled @@ -135,20 +151,20 @@ impl NetworkBehaviour for PeerManager { } // Check to make sure the peer is not supposed to be banned - match self.ban_status(peer_id) { + match self.ban_status(&peer_id) { // TODO: directly emit the ban event? BanResult::BadScore => { // This is a faulty state error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id); // Reban the peer - self.goodbye_peer(peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); + self.goodbye_peer(&peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); return; } BanResult::BannedIp(ip_addr) => { // A good peer has connected to us via a banned IP address. We ban the peer and // prevent future connections. debug!(self.log, "Peer connected via banned IP. Banning"; "peer_id" => %peer_id, "banned_ip" => %ip_addr); - self.goodbye_peer(peer_id, GoodbyeReason::BannedIP, ReportSource::PeerManager); + self.goodbye_peer(&peer_id, GoodbyeReason::BannedIP, ReportSource::PeerManager); return; } BanResult::NotBanned => {} @@ -162,11 +178,11 @@ impl NetworkBehaviour for PeerManager { .network_globals .peers .read() - .peer_info(peer_id) + .peer_info(&peer_id) .map_or(true, |peer| !peer.has_future_duty()) { // Gracefully disconnect the peer. - self.disconnect_peer(*peer_id, GoodbyeReason::TooManyPeers); + self.disconnect_peer(peer_id, GoodbyeReason::TooManyPeers); return; } @@ -174,14 +190,14 @@ impl NetworkBehaviour for PeerManager { // does not need to know about these peers. match endpoint { ConnectedPoint::Listener { send_back_addr, .. } => { - self.inject_connect_ingoing(peer_id, send_back_addr.clone(), None); + self.inject_connect_ingoing(&peer_id, send_back_addr.clone(), None); self.events - .push(PeerManagerEvent::PeerConnectedIncoming(*peer_id)); + .push(PeerManagerEvent::PeerConnectedIncoming(peer_id)); } ConnectedPoint::Dialer { address, .. } => { - self.inject_connect_outgoing(peer_id, address.clone(), None); + self.inject_connect_outgoing(&peer_id, address.clone(), None); self.events - .push(PeerManagerEvent::PeerConnectedOutgoing(*peer_id)); + .push(PeerManagerEvent::PeerConnectedOutgoing(peer_id)); } } @@ -189,14 +205,8 @@ impl NetworkBehaviour for PeerManager { self.update_connected_peer_metrics(); metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); } - fn inject_connection_closed( - &mut self, - peer_id: &PeerId, - _: &ConnectionId, - _: &ConnectedPoint, - _: DummyConnectionHandler, - remaining_established: usize, - ) { + + fn on_connection_closed(&mut self, peer_id: PeerId, remaining_established: usize) { if remaining_established > 0 { return; } @@ -206,62 +216,33 @@ impl NetworkBehaviour for PeerManager { .network_globals .peers .read() - .is_connected_or_disconnecting(peer_id) + .is_connected_or_disconnecting(&peer_id) { // We are disconnecting the peer or the peer has already been connected. // Both these cases, the peer has been previously registered by the peer manager and // potentially the application layer. // Inform the application. self.events - .push(PeerManagerEvent::PeerDisconnected(*peer_id)); + .push(PeerManagerEvent::PeerDisconnected(peer_id)); debug!(self.log, "Peer disconnected"; "peer_id" => %peer_id); } // NOTE: It may be the case that a rejected node, due to too many peers is disconnected // here and the peer manager has no knowledge of its connection. We insert it here for // reference so that peer manager can track this peer. - self.inject_disconnect(peer_id); + self.inject_disconnect(&peer_id); // Update the prometheus metrics self.update_connected_peer_metrics(); metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); } - fn inject_address_change( - &mut self, - _peer_id: &PeerId, - _connection_id: &ConnectionId, - old: &ConnectedPoint, - new: &ConnectedPoint, - ) { - debug_assert!( - matches!( - (old, new), - ( - // inbound remains inbound - ConnectedPoint::Listener { .. }, - ConnectedPoint::Listener { .. } - ) | ( - // outbound remains outbound - ConnectedPoint::Dialer { .. }, - ConnectedPoint::Dialer { .. } - ) - ), - "A peer has changed between inbound and outbound" - ) - } - /// A dial attempt has failed. /// /// NOTE: It can be the case that we are dialing a peer and during the dialing process the peer /// connects and the dial attempt later fails. To handle this, we only update the peer_db if /// the peer is not already connected. - fn inject_dial_failure( - &mut self, - peer_id: Option, - _handler: DummyConnectionHandler, - _error: &DialError, - ) { + fn on_dial_failure(&mut self, peer_id: Option) { if let Some(peer_id) = peer_id { if !self.network_globals.peers.read().is_connected(&peer_id) { self.inject_disconnect(&peer_id); diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs index dcc121b7f49..1178dbcb9ce 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs @@ -2,7 +2,7 @@ //! //! Currently using identify to fingerprint. -use libp2p::identify::IdentifyInfo; +use libp2p::identify::Info as IdentifyInfo; use serde::Serialize; use strum::{AsRefStr, EnumIter, IntoStaticStr}; diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index 3adc940a6a7..7d20b87ad1c 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -7,8 +7,8 @@ use libp2p::gossipsub::subscription_filter::{ MaxCountSubscriptionFilter, WhitelistSubscriptionFilter, }; use libp2p::gossipsub::Gossipsub as BaseGossipsub; -use libp2p::identify::Identify; -use libp2p::NetworkBehaviour; +use libp2p::identify::Behaviour as Identify; +use libp2p::swarm::NetworkBehaviour; use types::EthSpec; use super::api_types::RequestId; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index a6f1ce20ade..5b3598216b5 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -26,7 +26,7 @@ use libp2p::gossipsub::subscription_filter::MaxCountSubscriptionFilter; use libp2p::gossipsub::{ GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, }; -use libp2p::identify::{Identify, IdentifyConfig, IdentifyEvent}; +use libp2p::identify::{Behaviour as Identify, Config as IdentifyConfig, Event as IdentifyEvent}; use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; use libp2p::PeerId; @@ -316,7 +316,7 @@ impl Network { // use the executor for libp2p struct Executor(task_executor::TaskExecutor); - impl libp2p::core::Executor for Executor { + impl libp2p::swarm::Executor for Executor { fn exec(&self, f: Pin + Send>>) { self.0.spawn(f, "libp2p"); } @@ -341,12 +341,16 @@ impl Network { .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); ( - SwarmBuilder::new(transport, behaviour, local_peer_id) - .notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero")) - .connection_event_buffer_size(64) - .connection_limits(limits) - .executor(Box::new(Executor(executor))) - .build(), + SwarmBuilder::with_executor( + transport, + behaviour, + local_peer_id, + Executor(executor), + ) + .notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero")) + .connection_event_buffer_size(64) + .connection_limits(limits) + .build(), bandwidth, ) }; diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 3f02e73c0ea..addaaf5b5e9 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -44,8 +44,7 @@ type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; pub fn build_transport( local_private_key: Keypair, ) -> std::io::Result<(BoxedTransport, Arc)> { - let tcp = - libp2p::tcp::TokioTcpTransport::new(libp2p::tcp::GenTcpConfig::default().nodelay(true)); + let tcp = libp2p::tcp::tokio::Transport::new(libp2p::tcp::Config::default().nodelay(true)); let transport = libp2p::dns::TokioDnsConfig::system(tcp)?; #[cfg(feature = "libp2p-websocket")] let transport = { diff --git a/beacon_node/lighthouse_network/tests/common/mod.rs b/beacon_node/lighthouse_network/tests/common.rs similarity index 98% rename from beacon_node/lighthouse_network/tests/common/mod.rs rename to beacon_node/lighthouse_network/tests/common.rs index a3c32d0fb1b..b67b412cfc2 100644 --- a/beacon_node/lighthouse_network/tests/common/mod.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -15,13 +15,6 @@ use types::{ }; use unused_port::unused_tcp_port; -#[allow(clippy::type_complexity)] -#[allow(unused)] -pub mod behaviour; -#[allow(clippy::type_complexity)] -#[allow(unused)] -pub mod swarm; - type E = MinimalEthSpec; type ReqId = usize; diff --git a/beacon_node/lighthouse_network/tests/common/behaviour.rs b/beacon_node/lighthouse_network/tests/common/behaviour.rs deleted file mode 100644 index 50fe6941db2..00000000000 --- a/beacon_node/lighthouse_network/tests/common/behaviour.rs +++ /dev/null @@ -1,395 +0,0 @@ -// NOTE: Taken from libp2p's swarm's testing utils. -// -// Copyright 2020 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use std::collections::HashMap; -use std::task::{Context, Poll}; - -use libp2p::core::connection::{ConnectedPoint, ConnectionId}; -use libp2p::core::transport::ListenerId; -use libp2p::swarm::handler::{ConnectionHandler, DummyConnectionHandler, IntoConnectionHandler}; -use libp2p::swarm::{DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use libp2p::{Multiaddr, PeerId}; - -/// A `MockBehaviour` is a `NetworkBehaviour` that allows for -/// the instrumentation of return values, without keeping -/// any further state. -pub struct MockBehaviour< - THandler = DummyConnectionHandler, - TOutEvent = ::OutEvent, -> where - THandler: ConnectionHandler, -{ - /// The prototype protocols handler that is cloned for every - /// invocation of `new_handler`. - pub handler_proto: THandler, - /// The addresses to return from `addresses_of_peer`. - pub addresses: HashMap>, - /// The next action to return from `poll`. - /// - /// An action is only returned once. - pub next_action: Option>, -} - -impl MockBehaviour -where - THandler: ConnectionHandler, -{ - pub fn new(handler_proto: THandler) -> Self { - MockBehaviour { - handler_proto, - addresses: HashMap::new(), - next_action: None, - } - } -} - -impl NetworkBehaviour for MockBehaviour -where - THandler: ConnectionHandler + Clone, - THandler::OutEvent: Clone, - TOutEvent: Send + 'static, -{ - type ConnectionHandler = THandler; - type OutEvent = TOutEvent; - - fn new_handler(&mut self) -> Self::ConnectionHandler { - self.handler_proto.clone() - } - - fn addresses_of_peer(&mut self, p: &PeerId) -> Vec { - self.addresses.get(p).map_or(Vec::new(), |v| v.clone()) - } - - fn inject_event(&mut self, _: PeerId, _: ConnectionId, _: THandler::OutEvent) {} - - fn poll( - &mut self, - _: &mut Context, - _: &mut impl PollParameters, - ) -> Poll> { - Option::take(&mut self.next_action).map_or(Poll::Pending, Poll::Ready) - } -} - -/// A `CallTraceBehaviour` is a `NetworkBehaviour` that tracks invocations of callback methods and -/// their arguments, wrapping around an inner behaviour. It ensures certain invariants are met. -pub struct CallTraceBehaviour -where - TInner: NetworkBehaviour, -{ - inner: TInner, - - pub addresses_of_peer: Vec, - pub inject_connection_established: Vec<(PeerId, ConnectionId, ConnectedPoint, usize)>, - pub inject_connection_closed: Vec<(PeerId, ConnectionId, ConnectedPoint, usize)>, - pub inject_event: Vec<( - PeerId, - ConnectionId, - <::Handler as ConnectionHandler>::OutEvent, - )>, - pub inject_dial_failure: Vec>, - pub inject_new_listener: Vec, - pub inject_new_listen_addr: Vec<(ListenerId, Multiaddr)>, - pub inject_new_external_addr: Vec, - pub inject_expired_listen_addr: Vec<(ListenerId, Multiaddr)>, - pub inject_expired_external_addr: Vec, - pub inject_listener_error: Vec, - pub inject_listener_closed: Vec<(ListenerId, bool)>, - pub poll: usize, -} - -impl CallTraceBehaviour -where - TInner: NetworkBehaviour, -{ - pub fn new(inner: TInner) -> Self { - Self { - inner, - addresses_of_peer: Vec::new(), - inject_connection_established: Vec::new(), - inject_connection_closed: Vec::new(), - inject_event: Vec::new(), - inject_dial_failure: Vec::new(), - inject_new_listener: Vec::new(), - inject_new_listen_addr: Vec::new(), - inject_new_external_addr: Vec::new(), - inject_expired_listen_addr: Vec::new(), - inject_expired_external_addr: Vec::new(), - inject_listener_error: Vec::new(), - inject_listener_closed: Vec::new(), - poll: 0, - } - } - - #[allow(dead_code)] - pub fn reset(&mut self) { - self.addresses_of_peer = Vec::new(); - self.inject_connection_established = Vec::new(); - self.inject_connection_closed = Vec::new(); - self.inject_event = Vec::new(); - self.inject_dial_failure = Vec::new(); - self.inject_new_listen_addr = Vec::new(); - self.inject_new_external_addr = Vec::new(); - self.inject_expired_listen_addr = Vec::new(); - self.inject_listener_error = Vec::new(); - self.inject_listener_closed = Vec::new(); - self.poll = 0; - } - - pub fn inner(&mut self) -> &mut TInner { - &mut self.inner - } - - /// Checks that when the expected number of closed connection notifications are received, a - /// given number of expected disconnections have been received as well. - /// - /// Returns if the first condition is met. - pub fn assert_disconnected( - &self, - expected_closed_connections: usize, - expected_disconnections: usize, - ) -> bool { - if self.inject_connection_closed.len() == expected_closed_connections { - assert_eq!( - self.inject_connection_closed - .iter() - .filter(|(.., remaining_established)| { *remaining_established == 0 }) - .count(), - expected_disconnections - ); - return true; - } - - false - } - - /// Checks that when the expected number of established connection notifications are received, - /// a given number of expected connections have been received as well. - /// - /// Returns if the first condition is met. - pub fn assert_connected( - &self, - expected_established_connections: usize, - expected_connections: usize, - ) -> bool { - if self.inject_connection_established.len() == expected_established_connections { - assert_eq!( - self.inject_connection_established - .iter() - .filter(|(.., reported_aditional_connections)| { - *reported_aditional_connections == 0 - }) - .count(), - expected_connections - ); - return true; - } - - false - } -} - -impl NetworkBehaviour for CallTraceBehaviour -where - TInner: NetworkBehaviour, - <::Handler as ConnectionHandler>::OutEvent: - Clone, -{ - type ConnectionHandler = TInner::ConnectionHandler; - type OutEvent = TInner::OutEvent; - - fn new_handler(&mut self) -> Self::ConnectionHandler { - self.inner.new_handler() - } - - fn addresses_of_peer(&mut self, p: &PeerId) -> Vec { - self.addresses_of_peer.push(*p); - self.inner.addresses_of_peer(p) - } - - fn inject_connection_established( - &mut self, - p: &PeerId, - c: &ConnectionId, - e: &ConnectedPoint, - errors: Option<&Vec>, - other_established: usize, - ) { - let mut other_peer_connections = self - .inject_connection_established - .iter() - .rev() // take last to first - .filter_map(|(peer, .., other_established)| { - if p == peer { - Some(other_established) - } else { - None - } - }) - .take(other_established); - - // We are informed that there are `other_established` additional connections. Ensure that the - // number of previous connections is consistent with this - if let Some(&prev) = other_peer_connections.next() { - if prev < other_established { - assert_eq!( - prev, - other_established - 1, - "Inconsistent connection reporting" - ) - } - assert_eq!(other_peer_connections.count(), other_established - 1); - } else { - assert_eq!(other_established, 0) - } - self.inject_connection_established - .push((*p, *c, e.clone(), other_established)); - self.inner - .inject_connection_established(p, c, e, errors, other_established); - } - - fn inject_connection_closed( - &mut self, - p: &PeerId, - c: &ConnectionId, - e: &ConnectedPoint, - handler: ::Handler, - remaining_established: usize, - ) { - let mut other_closed_connections = self - .inject_connection_established - .iter() - .rev() // take last to first - .filter_map(|(peer, .., remaining_established)| { - if p == peer { - Some(remaining_established) - } else { - None - } - }) - .take(remaining_established); - - // We are informed that there are `other_established` additional connections. Ensure that the - // number of previous connections is consistent with this - if let Some(&prev) = other_closed_connections.next() { - if prev < remaining_established { - assert_eq!( - prev, - remaining_established - 1, - "Inconsistent closed connection reporting" - ) - } - assert_eq!(other_closed_connections.count(), remaining_established - 1); - } else { - assert_eq!(remaining_established, 0) - } - assert!( - self.inject_connection_established - .iter() - .any(|(peer, conn_id, endpoint, _)| (peer, conn_id, endpoint) == (p, c, e)), - "`inject_connection_closed` is called only for connections for \ - which `inject_connection_established` was called first." - ); - self.inject_connection_closed - .push((*p, *c, e.clone(), remaining_established)); - self.inner - .inject_connection_closed(p, c, e, handler, remaining_established); - } - - fn inject_event( - &mut self, - p: PeerId, - c: ConnectionId, - e: <::Handler as ConnectionHandler>::OutEvent, - ) { - assert!( - self.inject_connection_established - .iter() - .any(|(peer_id, conn_id, ..)| *peer_id == p && c == *conn_id), - "`inject_event` is called for reported connections." - ); - assert!( - !self - .inject_connection_closed - .iter() - .any(|(peer_id, conn_id, ..)| *peer_id == p && c == *conn_id), - "`inject_event` is never called for closed connections." - ); - - self.inject_event.push((p, c, e.clone())); - self.inner.inject_event(p, c, e); - } - - fn inject_dial_failure( - &mut self, - p: Option, - handler: Self::ConnectionHandler, - error: &DialError, - ) { - self.inject_dial_failure.push(p); - self.inner.inject_dial_failure(p, handler, error); - } - - fn inject_new_listener(&mut self, id: ListenerId) { - self.inject_new_listener.push(id); - self.inner.inject_new_listener(id); - } - - fn inject_new_listen_addr(&mut self, id: ListenerId, a: &Multiaddr) { - self.inject_new_listen_addr.push((id, a.clone())); - self.inner.inject_new_listen_addr(id, a); - } - - fn inject_expired_listen_addr(&mut self, id: ListenerId, a: &Multiaddr) { - self.inject_expired_listen_addr.push((id, a.clone())); - self.inner.inject_expired_listen_addr(id, a); - } - - fn inject_new_external_addr(&mut self, a: &Multiaddr) { - self.inject_new_external_addr.push(a.clone()); - self.inner.inject_new_external_addr(a); - } - - fn inject_expired_external_addr(&mut self, a: &Multiaddr) { - self.inject_expired_external_addr.push(a.clone()); - self.inner.inject_expired_external_addr(a); - } - - fn inject_listener_error(&mut self, l: ListenerId, e: &(dyn std::error::Error + 'static)) { - self.inject_listener_error.push(l); - self.inner.inject_listener_error(l, e); - } - - fn inject_listener_closed(&mut self, l: ListenerId, r: Result<(), &std::io::Error>) { - self.inject_listener_closed.push((l, r.is_ok())); - self.inner.inject_listener_closed(l, r); - } - - fn poll( - &mut self, - cx: &mut Context, - args: &mut impl PollParameters, - ) -> Poll> { - self.poll += 1; - self.inner.poll(cx, args) - } -} diff --git a/beacon_node/lighthouse_network/tests/common/swarm.rs b/beacon_node/lighthouse_network/tests/common/swarm.rs deleted file mode 100644 index aa41a5c066d..00000000000 --- a/beacon_node/lighthouse_network/tests/common/swarm.rs +++ /dev/null @@ -1,99 +0,0 @@ -use std::collections::HashMap; -use std::pin::Pin; - -use super::behaviour::{CallTraceBehaviour, MockBehaviour}; - -use futures::stream::Stream; -use futures::task::{Context, Poll}; -use libp2p::swarm::handler::ConnectionHandler; -use libp2p::swarm::{IntoConnectionHandler, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}; -use libp2p::{PeerId, Transport}; - -use futures::StreamExt; - -pub fn new_test_swarm(behaviour: B) -> Swarm -where - B: NetworkBehaviour, -{ - let id_keys = libp2p::identity::Keypair::generate_ed25519(); - let local_public_key = id_keys.public(); - let transport = libp2p::core::transport::MemoryTransport::default() - .upgrade(libp2p::core::upgrade::Version::V1) - .authenticate(libp2p::plaintext::PlainText2Config { - local_public_key: local_public_key.clone(), - }) - .multiplex(libp2p::yamux::YamuxConfig::default()) - .boxed(); - SwarmBuilder::new(transport, behaviour, local_public_key.into()).build() -} - -pub fn random_multiaddr() -> libp2p::multiaddr::Multiaddr { - libp2p::multiaddr::Protocol::Memory(rand::random::()).into() -} - -/// Bind a memory multiaddr to a compatible swarm. -pub async fn bind_listener( - swarm: &mut Swarm, -) -> libp2p::multiaddr::Multiaddr { - swarm.listen_on(random_multiaddr()).unwrap(); - match swarm.select_next_some().await { - SwarmEvent::NewListenAddr { - listener_id: _, - address, - } => address, - _ => panic!("Testing swarm's first event should be a new listener"), - } -} - -#[derive(Default)] -pub struct SwarmPool { - swarms: HashMap>, -} - -impl SwarmPool { - pub fn with_capacity(capacity: usize) -> Self { - Self { - swarms: HashMap::with_capacity(capacity), - } - } - pub fn insert(&mut self, swarm: Swarm) -> PeerId { - let peer_id = *swarm.local_peer_id(); - self.swarms.insert(peer_id, swarm); - peer_id - } - - pub fn remove(&mut self, peer_id: &PeerId) { - self.swarms.remove(peer_id); - } - - pub fn get_mut(&mut self, peer_id: &PeerId) -> Option<&mut Swarm> { - self.swarms.get_mut(peer_id) - } - - pub fn swarms(&self) -> &HashMap> { - &self.swarms - } - - pub fn swarms_mut(&mut self) -> &mut HashMap> { - &mut self.swarms - } -} - -impl Stream for SwarmPool -where - B: NetworkBehaviour, - ::ConnectionHandler: ConnectionHandler, -{ - type Item = (PeerId, - SwarmEvent<::OutEvent, <<::ConnectionHandler as IntoConnectionHandler>::Handler as ConnectionHandler>::Error>); - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut polls = self - .get_mut() - .swarms - .iter_mut() - .map(|(&peer_id, swarm)| swarm.map(move |ev| (peer_id, ev))) - .collect::>(); - polls.poll_next_unpin(cx) - } -} diff --git a/beacon_node/lighthouse_network/tests/pm_tests.rs b/beacon_node/lighthouse_network/tests/pm_tests.rs deleted file mode 100644 index 17a044ced02..00000000000 --- a/beacon_node/lighthouse_network/tests/pm_tests.rs +++ /dev/null @@ -1,203 +0,0 @@ -#![cfg(not(debug_assertions))] - -mod common; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; - -use common::{ - behaviour::{CallTraceBehaviour, MockBehaviour}, - swarm, -}; -use lighthouse_network::{ - peer_manager::{config::Config, PeerManagerEvent}, - NetworkGlobals, PeerAction, PeerInfo, PeerManager, ReportSource, -}; -use types::MinimalEthSpec as E; - -use futures::StreamExt; -use libp2p::{ - core::either::EitherError, - swarm::SwarmEvent, - swarm::{handler::DummyConnectionHandler, DummyBehaviour, KeepAlive, Swarm}, - NetworkBehaviour, -}; - -use slog::debug; - -/// Struct that mimics the lighthouse_network::Service with respect to handling peer manager -/// events. -// TODO: make this a real struct for more accurate testing. -struct Service { - swarm: Swarm, -} - -impl Service { - async fn select_next_some(&mut self) -> SwarmEvent> { - let ev = self.swarm.select_next_some().await; - match &ev { - SwarmEvent::Behaviour(Ev(PeerManagerEvent::Banned(peer_id, _addr_vec))) => { - self.swarm.ban_peer_id(*peer_id); - } - SwarmEvent::Behaviour(Ev(PeerManagerEvent::UnBanned(peer_id, _addr_vec))) => { - self.swarm.unban_peer_id(*peer_id); - } - SwarmEvent::Behaviour(Ev(PeerManagerEvent::DisconnectPeer(peer_id, _reason))) => { - // directly disconnect here. - let _ = self.swarm.disconnect_peer_id(*peer_id); - } - _ => {} - } - ev - } -} - -#[derive(Debug)] -struct Ev(PeerManagerEvent); -impl From for Ev { - fn from(_: void::Void) -> Self { - unreachable!("No events are emmited") - } -} -impl From for Ev { - fn from(ev: PeerManagerEvent) -> Self { - Ev(ev) - } -} - -#[derive(NetworkBehaviour)] -#[behaviour(out_event = "Ev")] -struct Behaviour { - pm_call_trace: CallTraceBehaviour>, - sibling: MockBehaviour, -} - -impl Behaviour { - fn new(pm: PeerManager) -> Self { - Behaviour { - pm_call_trace: CallTraceBehaviour::new(pm), - sibling: MockBehaviour::new(DummyConnectionHandler { - // The peer manager votes No, so we make sure the combined handler stays alive this - // way. - keep_alive: KeepAlive::Yes, - }), - } - } -} - -#[tokio::test] -async fn banned_peers_consistency() { - let log = common::build_log(slog::Level::Debug, false); - let pm_log = log.new(slog::o!("who" => "[PM]")); - let globals: Arc> = Arc::new(NetworkGlobals::new_test_globals(&log)); - - // Build the peer manager. - let (mut pm_service, pm_addr) = { - let pm_config = Config { - discovery_enabled: false, - ..Default::default() - }; - let pm = PeerManager::new(pm_config, globals.clone(), &pm_log).unwrap(); - let mut pm_swarm = swarm::new_test_swarm(Behaviour::new(pm)); - let pm_addr = swarm::bind_listener(&mut pm_swarm).await; - let service = Service { swarm: pm_swarm }; - (service, pm_addr) - }; - - let excess_banned_peers = 15; - let peers_to_ban = - lighthouse_network::peer_manager::peerdb::MAX_BANNED_PEERS + excess_banned_peers; - - // Build all the dummy peers needed. - let (mut swarm_pool, peers) = { - let mut pool = swarm::SwarmPool::with_capacity(peers_to_ban); - let mut peers = HashSet::with_capacity(peers_to_ban); - for _ in 0..peers_to_ban { - let mut peer_swarm = - swarm::new_test_swarm(DummyBehaviour::with_keep_alive(KeepAlive::Yes)); - let _peer_addr = swarm::bind_listener(&mut peer_swarm).await; - // It is ok to dial all at the same time since the swarm handles an event at a time. - peer_swarm.dial(pm_addr.clone()).unwrap(); - let peer_id = pool.insert(peer_swarm); - peers.insert(peer_id); - } - (pool, peers) - }; - - // we track banned peers at the swarm level here since there is no access to that info. - let mut swarm_banned_peers = HashMap::with_capacity(peers_to_ban); - let mut peers_unbanned = 0; - let timeout = tokio::time::sleep(tokio::time::Duration::from_secs(30)); - futures::pin_mut!(timeout); - - loop { - // poll the pm and dummy swarms. - tokio::select! { - pm_event = pm_service.select_next_some() => { - debug!(log, "[PM] {:?}", pm_event); - match pm_event { - SwarmEvent::Behaviour(Ev(ev)) => match ev { - PeerManagerEvent::Banned(peer_id, _) => { - let has_been_unbanned = false; - swarm_banned_peers.insert(peer_id, has_been_unbanned); - } - PeerManagerEvent::UnBanned(peer_id, _) => { - *swarm_banned_peers.get_mut(&peer_id).expect("Unbanned peer must be banned first") = true; - peers_unbanned += 1; - } - _ => {} - } - SwarmEvent::ConnectionEstablished { - peer_id, - endpoint: _, - num_established: _, - concurrent_dial_errors: _, - } => { - assert!(peers.contains(&peer_id)); - // now we report the peer as banned. - pm_service - .swarm - .behaviour_mut() - .pm_call_trace - .inner() - .report_peer( - &peer_id, - PeerAction::Fatal, - ReportSource::Processor, - None, - "" - ); - }, - _ => {} - } - } - Some((_peer_id, _peer_ev)) = swarm_pool.next() => { - // we need to poll the swarms to keep the peers going - } - _ = timeout.as_mut() => { - panic!("Test timeout.") - } - } - - if peers_unbanned == excess_banned_peers { - let pdb = globals.peers.read(); - let inconsistencies = swarm_banned_peers - .into_iter() - .map(|(peer_id, was_unbanned)| { - was_unbanned - != pdb.peer_info(&peer_id).map_or( - false, /* We forgot about a banned peer */ - PeerInfo::is_banned, - ) - }); - assert_eq!( - inconsistencies - .filter(|is_consistent| *is_consistent) - .count(), - peers_to_ban - ); - return; - } - } -} diff --git a/bors.toml b/bors.toml index dbe92c68f45..096ac3b29a2 100644 --- a/bors.toml +++ b/bors.toml @@ -23,7 +23,6 @@ status = [ "check-msrv", "slasher-tests", "syncing-simulator-ubuntu", - "disallowed-from-async-lint", "compile-with-beta-compiler" ] use_squash_merge = true From 4bd2b777ec114bf3e7940aad237914e948df9aa9 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 9 Jan 2023 03:11:59 +0000 Subject: [PATCH 15/23] Verify execution block hashes during finalized sync (#3794) ## Issue Addressed Recent discussions with other client devs about optimistic sync have revealed a conceptual issue with the optimisation implemented in #3738. In designing that feature I failed to consider that the execution node checks the `blockHash` of the execution payload before responding with `SYNCING`, and that omitting this check entirely results in a degradation of the full node's validation. A node omitting the `blockHash` checks could be tricked by a supermajority of validators into following an invalid chain, something which is ordinarily impossible. ## Proposed Changes I've added verification of the `payload.block_hash` in Lighthouse. In case of failure we log a warning and fall back to verifying the payload with the execution client. I've used our existing dependency on `ethers_core` for RLP support, and a new dependency on Parity's `triehash` crate for the Merkle patricia trie. Although the `triehash` crate is currently unmaintained it seems like our best option at the moment (it is also used by Reth, and requires vastly less boilerplate than Parity's generic `trie-root` library). Block hash verification is pretty quick, about 500us per block on my machine (mainnet). The optimistic finalized sync feature can be disabled using `--disable-optimistic-finalized-sync` which forces full verification with the EL. ## Additional Info This PR also introduces a new dependency on our [`metastruct`](https://github.com/sigp/metastruct) library, which was perfectly suited to the RLP serialization method. There will likely be changes as `metastruct` grows, but I think this is a good way to start dogfooding it. I took inspiration from some Parity and Reth code while writing this, and have preserved the relevant license headers on the files containing code that was copied and modified. --- Cargo.lock | 403 ++++++++---------- beacon_node/beacon_chain/src/chain_config.rs | 3 + .../beacon_chain/src/execution_payload.rs | 58 ++- beacon_node/builder_client/Cargo.toml | 2 +- beacon_node/execution_layer/Cargo.toml | 6 +- beacon_node/execution_layer/src/block_hash.rs | 163 +++++++ beacon_node/execution_layer/src/keccak.rs | 35 ++ beacon_node/execution_layer/src/lib.rs | 7 + beacon_node/execution_layer/src/metrics.rs | 5 + beacon_node/network/Cargo.toml | 2 +- beacon_node/src/cli.rs | 7 + beacon_node/src/config.rs | 4 + common/clap_utils/Cargo.toml | 2 +- consensus/cached_tree_hash/Cargo.toml | 2 +- consensus/merkle_proof/Cargo.toml | 2 +- consensus/serde_utils/Cargo.toml | 2 +- consensus/ssz/Cargo.toml | 2 +- consensus/swap_or_not_shuffle/Cargo.toml | 2 +- consensus/tree_hash/Cargo.toml | 2 +- consensus/types/Cargo.toml | 3 +- consensus/types/src/execution_block_header.rs | 74 ++++ consensus/types/src/lib.rs | 3 + crypto/bls/Cargo.toml | 2 +- lighthouse/tests/beacon_node.rs | 19 + testing/ef_tests/Cargo.toml | 2 +- .../execution_engine_integration/Cargo.toml | 4 +- testing/node_test_rig/Cargo.toml | 2 +- 27 files changed, 561 insertions(+), 257 deletions(-) create mode 100644 beacon_node/execution_layer/src/block_hash.rs create mode 100644 beacon_node/execution_layer/src/keccak.rs create mode 100644 consensus/types/src/execution_block_header.rs diff --git a/Cargo.lock b/Cargo.lock index cc1ad52477e..200c1fc9bd9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -755,7 +755,7 @@ dependencies = [ "eth2_hashing", "eth2_serde_utils", "eth2_ssz", - "ethereum-types 0.12.1", + "ethereum-types 0.14.1", "hex", "milagro_bls", "rand 0.7.3", @@ -804,51 +804,6 @@ dependencies = [ "types", ] -[[package]] -name = "borsh" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" -dependencies = [ - "borsh-derive", - "hashbrown 0.11.2", -] - -[[package]] -name = "borsh-derive" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" -dependencies = [ - "borsh-derive-internal", - "borsh-schema-derive-internal", - "proc-macro-crate 0.1.5", - "proc-macro2", - "syn", -] - -[[package]] -name = "borsh-derive-internal" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "borsh-schema-derive-internal" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "bs58" version = "0.4.0" @@ -900,27 +855,6 @@ version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" -[[package]] -name = "bytecheck" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f" -dependencies = [ - "bytecheck_derive", - "ptr_meta", -] - -[[package]] -name = "bytecheck_derive" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "byteorder" version = "1.4.3" @@ -965,7 +899,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "eth2_ssz_types", - "ethereum-types 0.12.1", + "ethereum-types 0.14.1", "quickcheck", "quickcheck_macros", "smallvec", @@ -1102,7 +1036,7 @@ dependencies = [ "dirs", "eth2_network_config", "eth2_ssz", - "ethereum-types 0.12.1", + "ethereum-types 0.14.1", "hex", "serde", "serde_json", @@ -1972,7 +1906,7 @@ dependencies = [ "derivative", "eth2_ssz", "eth2_ssz_derive", - "ethereum-types 0.12.1", + "ethereum-types 0.14.1", "execution_layer", "fork_choice", "fs2", @@ -2273,7 +2207,7 @@ dependencies = [ name = "eth2_serde_utils" version = "0.1.1" dependencies = [ - "ethereum-types 0.12.1", + "ethereum-types 0.14.1", "hex", "serde", "serde_derive", @@ -2285,7 +2219,7 @@ name = "eth2_ssz" version = "0.4.1" dependencies = [ "eth2_ssz_derive", - "ethereum-types 0.12.1", + "ethereum-types 0.14.1", "itertools", "smallvec", ] @@ -2359,11 +2293,11 @@ dependencies = [ [[package]] name = "ethabi" -version = "17.2.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4966fba78396ff92db3b817ee71143eccd98acf0f876b8d600e585a670c5d1b" +checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" dependencies = [ - "ethereum-types 0.13.1", + "ethereum-types 0.14.1", "hex", "once_cell", "regex", @@ -2381,22 +2315,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfb684ac8fa8f6c5759f788862bb22ec6fe3cb392f6bfd08e3c64b603661e3f8" dependencies = [ "crunchy", - "fixed-hash", + "fixed-hash 0.7.0", "impl-rlp", - "impl-serde", + "impl-serde 0.3.2", "tiny-keccak", ] [[package]] name = "ethbloom" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11da94e443c60508eb62cf256243a64da87304c2802ac2528847f79d750007ef" +checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" dependencies = [ "crunchy", - "fixed-hash", + "fixed-hash 0.8.0", + "impl-codec 0.6.0", "impl-rlp", - "impl-serde", + "impl-serde 0.4.0", + "scale-info", "tiny-keccak", ] @@ -2429,46 +2365,47 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05136f7057fe789f06e6d41d07b34e6f70d8c86e5693b60f97aaa6553553bdaf" dependencies = [ "ethbloom 0.11.1", - "fixed-hash", + "fixed-hash 0.7.0", "impl-rlp", - "impl-serde", + "impl-serde 0.3.2", "primitive-types 0.10.1", "uint", ] [[package]] name = "ethereum-types" -version = "0.13.1" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2827b94c556145446fcce834ca86b7abf0c39a805883fe20e72c5bfdb5a0dc6" +checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" dependencies = [ - "ethbloom 0.12.1", - "fixed-hash", + "ethbloom 0.13.0", + "fixed-hash 0.8.0", + "impl-codec 0.6.0", "impl-rlp", - "impl-serde", - "primitive-types 0.11.1", + "impl-serde 0.4.0", + "primitive-types 0.12.1", + "scale-info", "uint", ] [[package]] name = "ethers-core" -version = "0.17.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ebdd63c828f58aa067f40f9adcbea5e114fb1f90144b3a1e2858e0c9b1ff4e8" +checksum = "ade3e9c97727343984e1ceada4fdab11142d2ee3472d2c67027d56b1251d4f15" dependencies = [ "arrayvec", "bytes", "chrono", "elliptic-curve", - "ethabi 17.2.0", - "fastrlp", + "ethabi 18.0.0", "generic-array", "hex", "k256", + "open-fastrlp", "rand 0.8.5", "rlp", "rlp-derive", - "rust_decimal", "serde", "serde_json", "strum", @@ -2479,9 +2416,9 @@ dependencies = [ [[package]] name = "ethers-providers" -version = "0.17.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e46482e4d1e79b20c338fd9db9e166184eb387f0a4e7c05c5b5c0aa2e8c8900c" +checksum = "a1a9e0597aa6b2fdc810ff58bc95e4eeaa2c219b3e615ed025106ecb027407d8" dependencies = [ "async-trait", "auto_impl", @@ -2560,8 +2497,11 @@ dependencies = [ "exit-future", "fork_choice", "futures", + "hash-db", + "hash256-std-hasher", "hex", "jsonwebtoken", + "keccak-hash", "lazy_static", "lighthouse_metrics", "lru 0.7.8", @@ -2583,6 +2523,7 @@ dependencies = [ "tokio-stream", "tree_hash", "tree_hash_derive", + "triehash", "types", "warp", "zeroize", @@ -2618,31 +2559,6 @@ dependencies = [ "instant", ] -[[package]] -name = "fastrlp" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "089263294bb1c38ac73649a6ad563dd9a5142c8dc0482be15b8b9acb22a1611e" -dependencies = [ - "arrayvec", - "auto_impl", - "bytes", - "ethereum-types 0.13.1", - "fastrlp-derive", -] - -[[package]] -name = "fastrlp-derive" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6e454d03710df0cd95ce075d7731ce3fa35fb3779c15270cd491bc5f2ef9355" -dependencies = [ - "bytes", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ff" version = "0.12.1" @@ -2687,6 +2603,18 @@ dependencies = [ name = "fixed-hash" version = "0.7.0" source = "git+https://github.com/paritytech/parity-common?rev=df638ab0885293d21d656dc300d39236b69ce57d#df638ab0885293d21d656dc300d39236b69ce57d" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "arbitrary", "byteorder", @@ -3057,6 +2985,21 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +[[package]] +name = "hash-db" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" + +[[package]] +name = "hash256-std-hasher" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +dependencies = [ + "crunchy", +] + [[package]] name = "hashbrown" version = "0.11.2" @@ -3525,6 +3468,15 @@ dependencies = [ "serde", ] +[[package]] +name = "impl-serde" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" +dependencies = [ + "serde", +] + [[package]] name = "impl-trait-for-tuples" version = "0.2.2" @@ -3694,6 +3646,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-hash" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b286e6b663fb926e1eeb68528e69cb70ed46c6d65871a21b2215ae8154c6d3c" +dependencies = [ + "primitive-types 0.12.1", + "tiny-keccak", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -4627,13 +4589,36 @@ name = "merkle_proof" version = "0.2.0" dependencies = [ "eth2_hashing", - "ethereum-types 0.12.1", + "ethereum-types 0.14.1", "lazy_static", "quickcheck", "quickcheck_macros", "safe_arith", ] +[[package]] +name = "metastruct" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "734788dec2091fe9afa39530ca2ea7994f4a2c9aff3dbfebb63f2c1945c6f10b" +dependencies = [ + "metastruct_macro", +] + +[[package]] +name = "metastruct_macro" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ded15e7570c2a507a23e6c3a1c8d74507b779476e43afe93ddfc261d44173d" +dependencies = [ + "darling 0.13.4", + "itertools", + "proc-macro2", + "quote", + "smallvec", + "syn", +] + [[package]] name = "mev-build-rs" version = "0.2.1" @@ -4797,7 +4782,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro-error", "proc-macro2", "quote", @@ -4952,7 +4937,7 @@ dependencies = [ "error-chain", "eth2_ssz", "eth2_ssz_types", - "ethereum-types 0.12.1", + "ethereum-types 0.14.1", "exit-future", "fnv", "futures", @@ -5209,6 +5194,31 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "open-fastrlp" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", + "ethereum-types 0.14.1", + "open-fastrlp-derive", +] + +[[package]] +name = "open-fastrlp-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" +dependencies = [ + "bytes", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "openssl" version = "0.10.44" @@ -5359,7 +5369,7 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro2", "quote", "syn", @@ -5371,7 +5381,7 @@ version = "3.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro2", "quote", "syn", @@ -5679,35 +5689,27 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" dependencies = [ - "fixed-hash", + "fixed-hash 0.7.0", "impl-codec 0.5.1", "impl-rlp", - "impl-serde", + "impl-serde 0.3.2", "uint", ] [[package]] name = "primitive-types" -version = "0.11.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" +checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" dependencies = [ - "fixed-hash", + "fixed-hash 0.8.0", "impl-codec 0.6.0", "impl-rlp", - "impl-serde", + "impl-serde 0.4.0", + "scale-info", "uint", ] -[[package]] -name = "proc-macro-crate" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" -dependencies = [ - "toml", -] - [[package]] name = "proc-macro-crate" version = "1.1.3" @@ -5913,26 +5915,6 @@ dependencies = [ "unescape", ] -[[package]] -name = "ptr_meta" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" -dependencies = [ - "ptr_meta_derive", -] - -[[package]] -name = "ptr_meta_derive" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "quick-error" version = "1.2.3" @@ -6215,15 +6197,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "rend" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79af64b4b6362ffba04eef3a4e10829718a4896dac19daa741851c86781edf95" -dependencies = [ - "bytecheck", -] - [[package]] name = "reqwest" version = "0.11.13" @@ -6303,31 +6276,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "rkyv" -version = "0.7.39" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15" -dependencies = [ - "bytecheck", - "hashbrown 0.12.3", - "ptr_meta", - "rend", - "rkyv_derive", - "seahash", -] - -[[package]] -name = "rkyv_derive" -version = "0.7.39" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "rle-decode-fast" version = "1.0.3" @@ -6420,24 +6368,6 @@ dependencies = [ "smallvec", ] -[[package]] -name = "rust_decimal" -version = "1.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c321ee4e17d2b7abe12b5d20c1231db708dd36185c8a21e9de5fed6da4dbe9" -dependencies = [ - "arrayvec", - "borsh", - "bytecheck", - "byteorder", - "bytes", - "num-traits", - "rand 0.8.5", - "rkyv", - "serde", - "serde_json", -] - [[package]] name = "rustc-demangle" version = "0.1.21" @@ -6577,6 +6507,30 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "scale-info" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" +dependencies = [ + "cfg-if", + "derive_more", + "parity-scale-codec 3.2.1", + "scale-info-derive", +] + +[[package]] +name = "scale-info-derive" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "303959cf613a6f6efd19ed4b4ad5bf79966a13352716299ad532cfb115f4205c" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "schannel" version = "0.1.20" @@ -6658,12 +6612,6 @@ dependencies = [ "url", ] -[[package]] -name = "seahash" -version = "4.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" - [[package]] name = "sec1" version = "0.3.0" @@ -7466,7 +7414,7 @@ version = "0.2.0" dependencies = [ "criterion", "eth2_hashing", - "ethereum-types 0.12.1", + "ethereum-types 0.14.1", ] [[package]] @@ -8087,7 +8035,7 @@ dependencies = [ "eth2_hashing", "eth2_ssz", "eth2_ssz_derive", - "ethereum-types 0.12.1", + "ethereum-types 0.14.1", "rand 0.8.5", "smallvec", "tree_hash_derive", @@ -8103,6 +8051,16 @@ dependencies = [ "syn", ] +[[package]] +name = "triehash" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1631b201eb031b563d2e85ca18ec8092508e262a3196ce9bd10a67ec87b9f5c" +dependencies = [ + "hash-db", + "rlp", +] + [[package]] name = "trust-dns-proto" version = "0.22.0" @@ -8247,7 +8205,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "eth2_ssz_types", - "ethereum-types 0.12.1", + "ethereum-types 0.14.1", "hex", "int_to_bytes", "itertools", @@ -8255,6 +8213,7 @@ dependencies = [ "log", "maplit", "merkle_proof", + "metastruct", "parking_lot 0.12.1", "rand 0.8.5", "rand_xorshift", diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index c4c6966732d..cce2fbb971f 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -65,6 +65,8 @@ pub struct ChainConfig { /// Low values are useful for execution engines which don't improve their payload after the /// first call, and high values are useful for ensuring the EL is given ample notice. pub prepare_payload_lookahead: Duration, + /// Use EL-free optimistic sync for the finalized part of the chain. + pub optimistic_finalized_sync: bool, } impl Default for ChainConfig { @@ -89,6 +91,7 @@ impl Default for ChainConfig { count_unrealized_full: CountUnrealizedFull::default(), checkpoint_sync_url_timeout: 60, prepare_payload_lookahead: Duration::from_secs(4), + optimistic_finalized_sync: true, } } } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 85f7629bb79..7435c3a8cc4 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -15,7 +15,7 @@ use crate::{ use execution_layer::{BuilderParams, PayloadStatus}; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; -use slog::debug; +use slog::{debug, warn}; use slot_clock::SlotClock; use state_processing::per_block_processing::{ compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, @@ -59,26 +59,46 @@ impl PayloadNotifier { state: &BeaconState, notify_execution_layer: NotifyExecutionLayer, ) -> Result> { - let payload_verification_status = match notify_execution_layer { - NotifyExecutionLayer::No => Some(PayloadVerificationStatus::Optimistic), - NotifyExecutionLayer::Yes => { - if is_execution_enabled(state, block.message().body()) { - // Perform the initial stages of payload verification. - // - // We will duplicate these checks again during `per_block_processing`, however these checks - // are cheap and doing them here ensures we protect the execution engine from junk. - partially_verify_execution_payload( - state, - block.slot(), - block.message().execution_payload()?, - &chain.spec, - ) - .map_err(BlockError::PerBlockProcessingError)?; - None - } else { - Some(PayloadVerificationStatus::Irrelevant) + let payload_verification_status = if is_execution_enabled(state, block.message().body()) { + // Perform the initial stages of payload verification. + // + // We will duplicate these checks again during `per_block_processing`, however these + // checks are cheap and doing them here ensures we have verified them before marking + // the block as optimistically imported. This is particularly relevant in the case + // where we do not send the block to the EL at all. + let block_message = block.message(); + let payload = block_message.execution_payload()?; + partially_verify_execution_payload(state, block.slot(), payload, &chain.spec) + .map_err(BlockError::PerBlockProcessingError)?; + + match notify_execution_layer { + NotifyExecutionLayer::No if chain.config.optimistic_finalized_sync => { + // Verify the block hash here in Lighthouse and immediately mark the block as + // optimistically imported. This saves a lot of roundtrips to the EL. + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(ExecutionPayloadError::NoExecutionConnection)?; + + if let Err(e) = + execution_layer.verify_payload_block_hash(&payload.execution_payload) + { + warn!( + chain.log, + "Falling back to slow block hash verification"; + "block_number" => payload.block_number(), + "info" => "you can silence this warning with --disable-optimistic-finalized-sync", + "error" => ?e, + ); + None + } else { + Some(PayloadVerificationStatus::Optimistic) + } } + _ => None, } + } else { + Some(PayloadVerificationStatus::Irrelevant) }; Ok(Self { diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index c4d21c59ab8..48ac0300c98 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -9,4 +9,4 @@ reqwest = { version = "0.11.0", features = ["json","stream"] } sensitive_url = { path = "../../common/sensitive_url" } eth2 = { path = "../../common/eth2" } serde = { version = "1.0.116", features = ["derive"] } -serde_json = "1.0.58" \ No newline at end of file +serde_json = "1.0.58" diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index bfc748d5b62..76788b102e5 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -37,7 +37,7 @@ rand = "0.8.5" zeroize = { version = "1.4.2", features = ["zeroize_derive"] } lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lazy_static = "1.4.0" -ethers-core = "0.17.0" +ethers-core = "1.0.2" builder_client = { path = "../builder_client" } fork_choice = { path = "../../consensus/fork_choice" } mev-build-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "6c99b0fbdc0427b1625469d2e575303ce08de5b8" } @@ -45,3 +45,7 @@ ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus" ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" } tokio-stream = { version = "0.1.9", features = [ "sync" ] } strum = "0.24.0" +keccak-hash = "0.10.0" +hash256-std-hasher = "0.15.2" +triehash = "0.8.4" +hash-db = "0.15.2" diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs new file mode 100644 index 00000000000..f023c038aec --- /dev/null +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -0,0 +1,163 @@ +use crate::{ + keccak::{keccak256, KeccakHasher}, + metrics, Error, ExecutionLayer, +}; +use ethers_core::utils::rlp::RlpStream; +use keccak_hash::KECCAK_EMPTY_LIST_RLP; +use triehash::ordered_trie_root; +use types::{ + map_execution_block_header_fields, Address, EthSpec, ExecutionBlockHash, ExecutionBlockHeader, + ExecutionPayload, Hash256, Hash64, Uint256, +}; + +impl ExecutionLayer { + /// Verify `payload.block_hash` locally within Lighthouse. + /// + /// No remote calls to the execution client will be made, so this is quite a cheap check. + pub fn verify_payload_block_hash(&self, payload: &ExecutionPayload) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); + + // Calculate the transactions root. + // We're currently using a deprecated Parity library for this. We should move to a + // better alternative when one appears, possibly following Reth. + let rlp_transactions_root = ordered_trie_root::( + payload.transactions.iter().map(|txn_bytes| &**txn_bytes), + ); + + // Construct the block header. + let exec_block_header = ExecutionBlockHeader::from_payload( + payload, + KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(), + rlp_transactions_root, + ); + + // Hash the RLP encoding of the block header. + let rlp_block_header = rlp_encode_block_header(&exec_block_header); + let header_hash = ExecutionBlockHash::from_root(keccak256(&rlp_block_header)); + + if header_hash != payload.block_hash { + return Err(Error::BlockHashMismatch { + computed: header_hash, + payload: payload.block_hash, + transactions_root: rlp_transactions_root, + }); + } + + Ok(()) + } +} + +/// RLP encode an execution block header. +pub fn rlp_encode_block_header(header: &ExecutionBlockHeader) -> Vec { + let mut rlp_header_stream = RlpStream::new(); + rlp_header_stream.begin_unbounded_list(); + map_execution_block_header_fields!(&header, |_, field| { + rlp_header_stream.append(field); + }); + rlp_header_stream.finalize_unbounded_list(); + rlp_header_stream.out().into() +} + +#[cfg(test)] +mod test { + use super::*; + use hex::FromHex; + use std::str::FromStr; + + fn test_rlp_encoding( + header: &ExecutionBlockHeader, + expected_rlp: Option<&str>, + expected_hash: Hash256, + ) { + let rlp_encoding = rlp_encode_block_header(header); + + if let Some(expected_rlp) = expected_rlp { + let computed_rlp = hex::encode(&rlp_encoding); + assert_eq!(expected_rlp, computed_rlp); + } + + let computed_hash = keccak256(&rlp_encoding); + assert_eq!(expected_hash, computed_hash); + } + + #[test] + fn test_rlp_encode_eip1559_block() { + let header = ExecutionBlockHeader { + parent_hash: Hash256::from_str("e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2a").unwrap(), + ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(), + beneficiary: Address::from_str("ba5e000000000000000000000000000000000000").unwrap(), + state_root: Hash256::from_str("ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7").unwrap(), + transactions_root: Hash256::from_str("50f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accf").unwrap(), + receipts_root: Hash256::from_str("29b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9").unwrap(), + logs_bloom: <[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(), + difficulty: 0x020000.into(), + number: 0x01_u64.into(), + gas_limit: 0x016345785d8a0000_u64.into(), + gas_used: 0x015534_u64.into(), + timestamp: 0x079e, + extra_data: vec![0x42], + mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(), + nonce: Hash64::zero(), + base_fee_per_gas: 0x036b_u64.into(), + }; + let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b"; + let expected_hash = + Hash256::from_str("6a251c7c3c5dca7b42407a3752ff48f3bbca1fab7f9868371d9918daf1988d1f") + .unwrap(); + test_rlp_encoding(&header, Some(expected_rlp), expected_hash); + } + + #[test] + fn test_rlp_encode_merge_block() { + let header = ExecutionBlockHeader { + parent_hash: Hash256::from_str("927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbe").unwrap(), + ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(), + beneficiary: Address::from_str("ba5e000000000000000000000000000000000000").unwrap(), + state_root: Hash256::from_str("0xe97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82").unwrap(), + transactions_root: Hash256::from_str("50f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accf").unwrap(), + receipts_root: Hash256::from_str("29b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9").unwrap(), + logs_bloom: <[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(), + difficulty: 0x00.into(), + number: 0x01_u64.into(), + gas_limit: 0x016345785d8a0000_u64.into(), + gas_used: 0x015534_u64.into(), + timestamp: 0x079e, + extra_data: vec![0x42], + mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000020000").unwrap(), + nonce: Hash64::zero(), + base_fee_per_gas: 0x036b_u64.into(), + }; + let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b"; + let expected_hash = + Hash256::from_str("0x5b1f0f2efdaa19e996b4aea59eeb67620259f09732732a339a10dac311333684") + .unwrap(); + test_rlp_encoding(&header, Some(expected_rlp), expected_hash); + } + + // Test a real payload from mainnet. + #[test] + fn test_rlp_encode_block_16182891() { + let header = ExecutionBlockHeader { + parent_hash: Hash256::from_str("3e9c7b3f403947f110f68c4564a004b73dd8ebf73b143e46cc637926eec01a6d").unwrap(), + ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(), + beneficiary: Address::from_str("dafea492d9c6733ae3d56b7ed1adb60692c98bc5").unwrap(), + state_root: Hash256::from_str("5a8183d230818a167477420ce3a393ca3ef8706a7d596694ab6059894ed6fda9").unwrap(), + transactions_root: Hash256::from_str("0223f0cb35f184d2ac409e89dc0768ad738f777bd1c85d3302ca50f307180c94").unwrap(), + receipts_root: Hash256::from_str("371c76821b1cc21232574604eac5349d51647eb530e2a45d4f6fe2c501351aa5").unwrap(), + logs_bloom: <[u8; 256]>::from_hex("1a2c559955848d2662a0634cb40c7a6192a1524f11061203689bcbcdec901b054084d4f4d688009d24c10918e0089b48e72fe2d7abafb903889d10c3827c6901096612d259801b1b7ba1663a4201f5f88f416a9997c55bcc2c54785280143b057a008764c606182e324216822a2d5913e797a05c16cc1468d001acf3783b18e00e0203033e43106178db554029e83ca46402dc49d929d7882a04a0e7215041bdabf7430bd10ef4bb658a40f064c63c4816660241c2480862f26742fdf9ca41637731350301c344e439428182a03e384484e6d65d0c8a10117c6739ca201b60974519a1ae6b0c3966c0f650b449d10eae065dab2c83ab4edbab5efdea50bbc801").unwrap().into(), + difficulty: 0.into(), + number: 16182891.into(), + gas_limit: 0x1c9c380.into(), + gas_used: 0xe9b752.into(), + timestamp: 0x6399bf63, + extra_data: hex::decode("496c6c756d696e61746520446d6f63726174697a6520447374726962757465").unwrap(), + mix_hash: Hash256::from_str("bf5289894b2ceab3549f92f063febbac896b280ddb18129a57cff13113c11b13").unwrap(), + nonce: Hash64::zero(), + base_fee_per_gas: 0x34187b238_u64.into(), + }; + let expected_hash = + Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351") + .unwrap(); + test_rlp_encoding(&header, None, expected_hash); + } +} diff --git a/beacon_node/execution_layer/src/keccak.rs b/beacon_node/execution_layer/src/keccak.rs new file mode 100644 index 00000000000..c4c96892728 --- /dev/null +++ b/beacon_node/execution_layer/src/keccak.rs @@ -0,0 +1,35 @@ +// Copyright 2017, 2018 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use hash256_std_hasher::Hash256StdHasher; +use hash_db::Hasher; +use types::Hash256; + +pub fn keccak256(bytes: &[u8]) -> Hash256 { + Hash256::from(ethers_core::utils::keccak256(bytes)) +} + +/// Keccak hasher. +#[derive(Default, Debug, Clone, PartialEq)] +pub struct KeccakHasher; + +impl Hasher for KeccakHasher { + type Out = Hash256; + type StdHasher = Hash256StdHasher; + + const LENGTH: usize = 32; + + fn hash(x: &[u8]) -> Self::Out { + keccak256(x) + } +} diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index ec1415f80bb..a4d15abb364 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -40,8 +40,10 @@ use types::{ ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256, }; +mod block_hash; mod engine_api; mod engines; +mod keccak; mod metrics; pub mod payload_cache; mod payload_status; @@ -90,6 +92,11 @@ pub enum Error { ShuttingDown, FeeRecipientUnspecified, MissingLatestValidHash, + BlockHashMismatch { + computed: ExecutionBlockHash, + payload: ExecutionBlockHash, + transactions_root: Hash256, + }, InvalidJWTSecret(String), } diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index bb5a1088d1a..287050f66be 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -45,6 +45,11 @@ lazy_static::lazy_static! { "execution_layer_get_payload_by_block_hash_time", "Time to reconstruct a payload from the EE using eth_getBlockByHash" ); + pub static ref EXECUTION_LAYER_VERIFY_BLOCK_HASH: Result = try_create_histogram_with_buckets( + "execution_layer_verify_block_hash_time", + "Time to verify the execution block hash in Lighthouse, without the EL", + Ok(vec![10e-6, 50e-6, 100e-6, 500e-6, 1e-3, 5e-3, 10e-3, 50e-3, 100e-3, 500e-3]), + ); pub static ref EXECUTION_LAYER_PAYLOAD_STATUS: Result = try_create_int_counter_vec( "execution_layer_payload_status", "Indicates the payload status returned for a particular method", diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 2e7b2227b2f..b1d928eecb9 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -44,7 +44,7 @@ strum = "0.24.0" tokio-util = { version = "0.6.3", features = ["time"] } derivative = "2.2.0" delay_map = "0.1.1" -ethereum-types = { version = "0.12.1", optional = true } +ethereum-types = { version = "0.14.1", optional = true } [features] deterministic_long_lived_attnets = [ "ethereum-types" ] diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index c5f4cc8adf4..915872e2b5e 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -900,6 +900,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Useful if you intend to run a non-validating beacon node.") .takes_value(false) ) + .arg( + Arg::with_name("disable-optimistic-finalized-sync") + .long("disable-optimistic-finalized-sync") + .help("Force Lighthouse to verify every execution block hash with the execution \ + client during finalized sync. By default block hashes will be checked in \ + Lighthouse and only passed to the EL if initial verification fails.") + ) .arg( Arg::with_name("light-client-server") .long("light-client-server") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index b3bfa696edf..241a5952eec 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -741,6 +741,10 @@ pub fn get_config( client_config.validator_monitor_auto = true; } + // Optimistic finalized sync. + client_config.chain.optimistic_finalized_sync = + !cli_args.is_present("disable-optimistic-finalized-sync"); + Ok(client_config) } diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index b370eb0825b..62eb8aa3d5d 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -12,7 +12,7 @@ hex = "0.4.2" dirs = "3.0.1" eth2_network_config = { path = "../eth2_network_config" } eth2_ssz = "0.4.1" -ethereum-types = "0.12.1" +ethereum-types = "0.14.1" serde = "1.0.116" serde_json = "1.0.59" serde_yaml = "0.8.13" diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index f9433e4a496..cd03b37a448 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Michael Sproul "] edition = "2021" [dependencies] -ethereum-types = "0.12.1" +ethereum-types = "0.14.1" eth2_ssz_types = "0.2.2" eth2_hashing = "0.3.0" eth2_ssz_derive = "0.3.0" diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index 7400d4f54d8..2c0dbf1a758 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Michael Sproul "] edition = "2021" [dependencies] -ethereum-types = "0.12.1" +ethereum-types = "0.14.1" eth2_hashing = "0.3.0" lazy_static = "1.4.0" safe_arith = { path = "../safe_arith" } diff --git a/consensus/serde_utils/Cargo.toml b/consensus/serde_utils/Cargo.toml index 1d258ec6efd..d4ba02765fb 100644 --- a/consensus/serde_utils/Cargo.toml +++ b/consensus/serde_utils/Cargo.toml @@ -11,4 +11,4 @@ serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" serde_json = "1.0.58" hex = "0.4.2" -ethereum-types = "0.12.1" +ethereum-types = "0.14.1" diff --git a/consensus/ssz/Cargo.toml b/consensus/ssz/Cargo.toml index a153c2efc14..1eef2a55571 100644 --- a/consensus/ssz/Cargo.toml +++ b/consensus/ssz/Cargo.toml @@ -13,7 +13,7 @@ name = "ssz" eth2_ssz_derive = "0.3.0" [dependencies] -ethereum-types = "0.12.1" +ethereum-types = "0.14.1" smallvec = { version = "1.6.1", features = ["const_generics"] } itertools = "0.10.3" diff --git a/consensus/swap_or_not_shuffle/Cargo.toml b/consensus/swap_or_not_shuffle/Cargo.toml index 1c7ad5f02a9..9a7d58b77d9 100644 --- a/consensus/swap_or_not_shuffle/Cargo.toml +++ b/consensus/swap_or_not_shuffle/Cargo.toml @@ -13,7 +13,7 @@ criterion = "0.3.3" [dependencies] eth2_hashing = "0.3.0" -ethereum-types = "0.12.1" +ethereum-types = "0.14.1" [features] arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/tree_hash/Cargo.toml b/consensus/tree_hash/Cargo.toml index ab080eac065..731e2f177e5 100644 --- a/consensus/tree_hash/Cargo.toml +++ b/consensus/tree_hash/Cargo.toml @@ -15,7 +15,7 @@ eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" [dependencies] -ethereum-types = "0.12.1" +ethereum-types = "0.14.1" eth2_hashing = "0.3.0" smallvec = "1.6.1" diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 1ccc8dba8b9..6ae185f7fff 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -14,7 +14,7 @@ bls = { path = "../../crypto/bls" } compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } -ethereum-types = "0.12.1" +ethereum-types = "0.14.1" eth2_hashing = "0.3.0" hex = "0.4.2" int_to_bytes = { path = "../int_to_bytes" } @@ -45,6 +45,7 @@ lazy_static = "1.4.0" parking_lot = "0.12.0" itertools = "0.10.0" superstruct = "0.5.0" +metastruct = "0.1.0" serde_json = "1.0.74" smallvec = "1.8.0" serde_with = "1.13.0" diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution_block_header.rs new file mode 100644 index 00000000000..4baa5dd3958 --- /dev/null +++ b/consensus/types/src/execution_block_header.rs @@ -0,0 +1,74 @@ +// Copyright (c) 2022 Reth Contributors +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +use crate::{Address, EthSpec, ExecutionPayload, Hash256, Hash64, Uint256}; +use metastruct::metastruct; + +/// Execution block header as used for RLP encoding and Keccak hashing. +/// +/// Credit to Reth for the type definition. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[metastruct(mappings(map_execution_block_header_fields()))] +pub struct ExecutionBlockHeader { + pub parent_hash: Hash256, + pub ommers_hash: Hash256, + pub beneficiary: Address, + pub state_root: Hash256, + pub transactions_root: Hash256, + pub receipts_root: Hash256, + pub logs_bloom: Vec, + pub difficulty: Uint256, + pub number: Uint256, + pub gas_limit: Uint256, + pub gas_used: Uint256, + pub timestamp: u64, + pub extra_data: Vec, + pub mix_hash: Hash256, + pub nonce: Hash64, + pub base_fee_per_gas: Uint256, +} + +impl ExecutionBlockHeader { + pub fn from_payload( + payload: &ExecutionPayload, + rlp_empty_list_root: Hash256, + rlp_transactions_root: Hash256, + ) -> Self { + // Most of these field mappings are defined in EIP-3675 except for `mixHash`, which is + // defined in EIP-4399. + ExecutionBlockHeader { + parent_hash: payload.parent_hash.into_root(), + ommers_hash: rlp_empty_list_root, + beneficiary: payload.fee_recipient, + state_root: payload.state_root, + transactions_root: rlp_transactions_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom.clone().into(), + difficulty: Uint256::zero(), + number: payload.block_number.into(), + gas_limit: payload.gas_limit.into(), + gas_used: payload.gas_used.into(), + timestamp: payload.timestamp, + extra_data: payload.extra_data.clone().into(), + mix_hash: payload.prev_randao, + nonce: Hash64::zero(), + base_fee_per_gas: payload.base_fee_per_gas, + } + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 37bab8b4806..87f5ebe8b3c 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -74,6 +74,7 @@ pub mod voluntary_exit; #[macro_use] pub mod slot_epoch_macros; pub mod config_and_preset; +pub mod execution_block_header; pub mod fork_context; pub mod participation_flags; pub mod participation_list; @@ -127,6 +128,7 @@ pub use crate::enr_fork_id::EnrForkId; pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; pub use crate::execution_block_hash::ExecutionBlockHash; +pub use crate::execution_block_header::ExecutionBlockHeader; pub use crate::execution_payload::{ExecutionPayload, Transaction, Transactions}; pub use crate::execution_payload_header::ExecutionPayloadHeader; pub use crate::fork::Fork; @@ -179,6 +181,7 @@ pub type Hash256 = H256; pub type Uint256 = ethereum_types::U256; pub type Address = H160; pub type ForkVersion = [u8; 4]; +pub type Hash64 = ethereum_types::H64; pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index 9ac468d2272..c3331824d9e 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -14,7 +14,7 @@ serde_derive = "1.0.116" eth2_serde_utils = "0.1.1" hex = "0.4.2" eth2_hashing = "0.3.0" -ethereum-types = "0.12.1" +ethereum-types = "0.14.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } zeroize = { version = "1.4.2", features = ["zeroize_derive"] } blst = { version = "0.3.3", optional = true } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 07c583da5cb..bd96885c68e 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1697,3 +1697,22 @@ fn gui_flag() { assert!(config.validator_monitor_auto); }); } + +#[test] +fn optimistic_finalized_sync_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert!(config.chain.optimistic_finalized_sync); + }); +} + +#[test] +fn disable_optimistic_finalized_sync() { + CommandLineTest::new() + .flag("disable-optimistic-finalized-sync", None) + .run_with_zero_port() + .with_config(|config| { + assert!(!config.chain.optimistic_finalized_sync); + }); +} diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 04a222c7af2..d969c9727d7 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -15,7 +15,7 @@ bls = { path = "../../crypto/bls", default-features = false } compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } derivative = "2.1.1" -ethereum-types = "0.12.1" +ethereum-types = "0.14.1" hex = "0.4.2" rayon = "1.4.1" serde = "1.0.116" diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index a85138be95b..26b5f596f22 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -15,8 +15,8 @@ execution_layer = { path = "../../beacon_node/execution_layer" } sensitive_url = { path = "../../common/sensitive_url" } types = { path = "../../consensus/types" } unused_port = { path = "../../common/unused_port" } -ethers-core = "0.17.0" -ethers-providers = "0.17.0" +ethers-core = "1.0.2" +ethers-providers = "1.0.2" deposit_contract = { path = "../../common/deposit_contract" } reqwest = { version = "0.11.0", features = ["json"] } hex = "0.4.2" diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index 2c9bd5939f8..ea5d005c16a 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -13,4 +13,4 @@ eth2 = { path = "../../common/eth2" } validator_client = { path = "../../validator_client" } validator_dir = { path = "../../common/validator_dir", features = ["insecure_keys"] } sensitive_url = { path = "../../common/sensitive_url" } -execution_layer = { path = "../../beacon_node/execution_layer" } \ No newline at end of file +execution_layer = { path = "../../beacon_node/execution_layer" } From 168a7805c3aa387cbdb05ff066367011facf9655 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 9 Jan 2023 05:12:31 +0000 Subject: [PATCH 16/23] Add more Gnosis bootnodes (#3855) ## Proposed Changes Add the latest long-running Gnosis chain bootnodes provided to us by the Gnosis team. --- .../built_in_network_configs/gnosis/boot_enr.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml index 130c1fa1c32..e5896988c92 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/boot_enr.yaml @@ -3,3 +3,7 @@ - enr:-Ly4QBf76jLiCA_pDXoZjhyRbuwzFOscFY-MIKkPnmHPQbvaKhIDZutfe38G9ibzgQP0RKrTo3vcWOy4hf_8wOZ-U5MBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhBLGgjaJc2VjcDI1NmsxoQLGeo0Q4lDvxIjHjnkAqEuETTaFIjsNrEcSpdDhcHXWFYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA - enr:-Ly4QLjZUWdqUO_RwyDqCAccIK5-MbLRD6A2c7oBuVbBgBnWDkEf0UKJVAaJqi2pO101WVQQLYSnYgz1Q3pRhYdrlFoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhANA8sSJc2VjcDI1NmsxoQK4TC_EK1jSs0VVPUpOjIo1rhJmff2SLBPFOWSXMwdLVYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA - enr:-Ly4QKwX2rTFtKWKQHSGQFhquxsxL1jewO8JB1MG-jgHqAZVFWxnb3yMoQqnYSV1bk25-_jiLuhIulxar3RBWXEDm6EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhAN-qZeJc2VjcDI1NmsxoQI7EPGMpecl0QofLp4Wy_lYNCCChUFEH6kY7k-oBGkPFIhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QPoChSQTleJROee1-k-4HOEgKqL9kLksE-tEiVqcY9kwF9V53aBg-MruD7Yx4Aks3LAeJpKXAS4ntMrIdqvQYc8Ch2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhGsWBHiJc2VjcDI1NmsxoQKwGQrwOSBJB_DtQOkFZVAY4YQfMAbUVxFpL5WgrzEddYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QBbaKRSX4SncCOxTTL611Kxlz-zYFrIn-k_63jGIPK_wbvFghVUHJICPCxufgTX5h79jvgfPr-2hEEQEdziGQ5MCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhAMazo6Jc2VjcDI1NmsxoQKt-kbM9isuWp8djhyEq6-4MLv1Sy7dOXeMOMdPgwu9LohzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QKJ5BzgFyJ6BaTlGY0C8ROzl508U3GA6qxdG5Gn2hxdke6nQO187pYlLvhp82Dez4PQn436Fts1F0WAm-_5l2LACh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhA-YLVKJc2VjcDI1NmsxoQI8_Lvr6p_TkcAu8KorKacfUEnoOon0tdO0qWhriPdBP4hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA +- enr:-Ly4QJMtoiX2bPnVbiQOJCLbtUlqdqZk7kCJQln_W1bp1vOHcxWowE-iMXkKC4_uOb0o73wAW71WYi80Dlsg-7a5wiICh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhDbP3KmJc2VjcDI1NmsxoQNvcfKYUqcemLFlpKxl7JcQJwQ3L9unYL44gY2aEiRnI4hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA From 830efdb5c2723209030e83378b2d8e44f9a6b97f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 9 Jan 2023 08:18:55 +0000 Subject: [PATCH 17/23] Improve validator monitor experience for high validator counts (#3728) ## Issue Addressed NA ## Proposed Changes Myself and others (#3678) have observed that when running with lots of validators (e.g., 1000s) the cardinality is too much for Prometheus. I've seen Prometheus instances just grind to a halt when we turn the validator monitor on for our testnet validators (we have 10,000s of Goerli validators). Additionally, the debug log volume can get very high with one log per validator, per attestation. To address this, the `bn --validator-monitor-individual-tracking-threshold ` flag has been added to *disable* per-validator (i.e., non-aggregated) metrics/logging once the validator monitor exceeds the threshold of validators. The default value is `64`, which is a finger-to-the-wind value. I don't actually know the value at which Prometheus starts to become overwhelmed, but I've seen it work with ~64 validators and I've seen it *not* work with 1000s of validators. A default of `64` seems like it will result in a breaking change to users who are running millions of dollars worth of validators whilst resulting in a no-op for low-validator-count users. I'm open to changing this number, though. Additionally, this PR starts collecting aggregated Prometheus metrics (e.g., total count of head hits across all validators), so that high-validator-count validators still have some interesting metrics. We already had logging for aggregated values, so nothing has been added there. I've opted to make this a breaking change since it can be rather damaging to your Prometheus instance to accidentally enable the validator monitor with large numbers of validators. I've crashed a Prometheus instance myself and had a report from another user who's done the same thing. ## Additional Info NA ## Breaking Changes Note A new label has been added to the validator monitor Prometheus metrics: `total`. This label tracks the aggregated metrics of all validators in the validator monitor (as opposed to each validator being tracking individually using its pubkey as the label). Additionally, a new flag has been added to the Beacon Node: `--validator-monitor-individual-tracking-threshold`. The default value is `64`, which means that when the validator monitor is tracking more than 64 validators then it will stop tracking per-validator metrics and only track the `all_validators` metric. It will also stop logging per-validator logs and only emit aggregated logs (the exception being that exit and slashing logs are always emitted). These changes were introduced in #3728 to address issues with untenable Prometheus cardinality and log volume when using the validator monitor with high validator counts (e.g., 1000s of validators). Users with less than 65 validators will see no change in behavior (apart from the added `all_validators` metric). Users with more than 65 validators who wish to maintain the previous behavior can set something like `--validator-monitor-individual-tracking-threshold 999999`. --- beacon_node/beacon_chain/src/builder.rs | 10 +- beacon_node/beacon_chain/src/test_utils.rs | 3 +- .../beacon_chain/src/validator_monitor.rs | 760 +++++++++++------- beacon_node/beacon_chain/tests/store_tests.rs | 3 +- beacon_node/client/src/builder.rs | 1 + beacon_node/client/src/config.rs | 7 + .../network/src/subnet_service/tests/mod.rs | 3 +- beacon_node/src/cli.rs | 11 + beacon_node/src/config.rs | 6 + lighthouse/tests/beacon_node.rs | 25 + 10 files changed, 522 insertions(+), 307 deletions(-) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index eff50701d7a..48419d46edb 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -579,11 +579,13 @@ where mut self, auto_register: bool, validators: Vec, + individual_metrics_threshold: usize, log: Logger, ) -> Self { self.validator_monitor = Some(ValidatorMonitor::new( validators, auto_register, + individual_metrics_threshold, log.clone(), )); self @@ -989,6 +991,7 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String { #[cfg(test)] mod test { use super::*; + use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; use eth2_hashing::hash; use genesis::{ generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH, @@ -1045,7 +1048,12 @@ mod test { .testing_slot_clock(Duration::from_secs(1)) .expect("should configure testing slot clock") .shutdown_sender(shutdown_tx) - .monitor_validators(true, vec![], log.clone()) + .monitor_validators( + true, + vec![], + DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, + log.clone(), + ) .build() .expect("should build"); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 9183583fb1b..66de3f02d23 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2,6 +2,7 @@ pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, + validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, }; use crate::{ @@ -472,7 +473,7 @@ where log.clone(), 5, ))) - .monitor_validators(true, vec![], log); + .monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log); builder = if let Some(mutator) = self.initial_mutator { mutator(builder) diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index e95394bb78d..dad5e1517ad 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -21,10 +21,21 @@ use types::{ SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit, }; +/// Used for Prometheus labels. +/// +/// We've used `total` for this value to align with Nimbus, as per: +/// https://github.com/sigp/lighthouse/pull/3728#issuecomment-1375173063 +const TOTAL_LABEL: &str = "total"; + /// The validator monitor collects per-epoch data about each monitored validator. Historical data /// will be kept around for `HISTORIC_EPOCHS` before it is pruned. pub const HISTORIC_EPOCHS: usize = 4; +/// Once the validator monitor reaches this number of validators it will stop +/// tracking their metrics/logging individually in an effort to reduce +/// Prometheus cardinality and log volume. +pub const DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD: usize = 64; + #[derive(Debug)] pub enum Error { InvalidPubkey(String), @@ -258,16 +269,27 @@ pub struct ValidatorMonitor { indices: HashMap, /// If true, allow the automatic registration of validators. auto_register: bool, + /// Once the number of monitored validators goes above this threshold, we + /// will stop tracking metrics/logs on a per-validator basis. This prevents + /// large validator counts causing infeasibly high cardinailty for + /// Prometheus and high log volumes. + individual_tracking_threshold: usize, log: Logger, _phantom: PhantomData, } impl ValidatorMonitor { - pub fn new(pubkeys: Vec, auto_register: bool, log: Logger) -> Self { + pub fn new( + pubkeys: Vec, + auto_register: bool, + individual_tracking_threshold: usize, + log: Logger, + ) -> Self { let mut s = Self { validators: <_>::default(), indices: <_>::default(), auto_register, + individual_tracking_threshold, log, _phantom: PhantomData, }; @@ -277,6 +299,13 @@ impl ValidatorMonitor { s } + /// Returns `true` when the validator count is sufficiently low enough to + /// emit metrics and logs on a per-validator basis (rather than just an + /// aggregated basis). + fn individual_tracking(&self) -> bool { + self.validators.len() <= self.individual_tracking_threshold + } + /// Add some validators to `self` for additional monitoring. fn add_validator_pubkey(&mut self, pubkey: PublicKeyBytes) { let index_opt = self @@ -317,6 +346,12 @@ impl ValidatorMonitor { for monitored_validator in self.validators.values() { if let Some(i) = monitored_validator.index { monitored_validator.touch_epoch_summary(current_epoch); + + // Only log the per-validator metrics if it's enabled. + if !self.individual_tracking() { + continue; + } + let i = i as usize; let id = &monitored_validator.id; @@ -379,6 +414,24 @@ impl ValidatorMonitor { } } + /// Run `func` with the `TOTAL_LABEL` and optionally the + /// `individual_id`. + /// + /// This function is used for registering metrics that can be applied to + /// both all validators and an indivdual validator. For example, the count + /// of missed head votes can be aggregated across all validators in a single + /// metric and also tracked on a per-validator basis. + /// + /// We allow disabling tracking metrics on an individual validator basis + /// since it can result in untenable cardinality with high validator counts. + fn aggregatable_metric(&self, individual_id: &str, func: F) { + func(TOTAL_LABEL); + + if self.individual_tracking() { + func(individual_id); + } + } + pub fn process_validator_statuses( &self, epoch: Epoch, @@ -431,72 +484,92 @@ impl ValidatorMonitor { // For Base states, this will be *any* attestation whatsoever. For Altair states, // this will be any attestation that matched a "timely" flag. if previous_epoch_matched_any { - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT, - &[id], - ); + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT, + &[label], + ) + }); attestation_success.push(id); - debug!( - self.log, - "Previous epoch attestation success"; - "matched_source" => previous_epoch_matched_source, - "matched_target" => previous_epoch_matched_target, - "matched_head" => previous_epoch_matched_head, - "epoch" => prev_epoch, - "validator" => id, - ) + if self.individual_tracking() { + debug!( + self.log, + "Previous epoch attestation success"; + "matched_source" => previous_epoch_matched_source, + "matched_target" => previous_epoch_matched_target, + "matched_head" => previous_epoch_matched_head, + "epoch" => prev_epoch, + "validator" => id, + ) + } } else { - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS, - &[id], - ); + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS, + &[label], + ); + }); attestation_miss.push(id); - debug!( - self.log, - "Previous epoch attestation missing"; - "epoch" => prev_epoch, - "validator" => id, - ) + if self.individual_tracking() { + debug!( + self.log, + "Previous epoch attestation missing"; + "epoch" => prev_epoch, + "validator" => id, + ) + } } // Indicates if any on-chain attestation hit the head. if previous_epoch_matched_head { - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT, - &[id], - ); + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT, + &[label], + ); + }); } else { - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS, - &[id], - ); + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS, + &[label], + ); + }); head_miss.push(id); - debug!( - self.log, - "Attestation failed to match head"; - "epoch" => prev_epoch, - "validator" => id, - ); + if self.individual_tracking() { + debug!( + self.log, + "Attestation failed to match head"; + "epoch" => prev_epoch, + "validator" => id, + ); + } } // Indicates if any on-chain attestation hit the target. if previous_epoch_matched_target { - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT, - &[id], - ); + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT, + &[label], + ); + }); } else { - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS, - &[id], - ); + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS, + &[label], + ); + }); target_miss.push(id); - debug!( - self.log, - "Attestation failed to match target"; - "epoch" => prev_epoch, - "validator" => id, - ); + if self.individual_tracking() { + debug!( + self.log, + "Attestation failed to match target"; + "epoch" => prev_epoch, + "validator" => id, + ); + } } // Get the minimum value among the validator monitor observed inclusion distance @@ -511,21 +584,25 @@ impl ValidatorMonitor { if let Some(inclusion_delay) = min_inclusion_distance { if inclusion_delay > spec.min_attestation_inclusion_delay { suboptimal_inclusion.push(id); - debug!( - self.log, - "Potential sub-optimal inclusion delay"; - "optimal" => spec.min_attestation_inclusion_delay, - "delay" => inclusion_delay, - "epoch" => prev_epoch, - "validator" => id, - ); + if self.individual_tracking() { + debug!( + self.log, + "Potential sub-optimal inclusion delay"; + "optimal" => spec.min_attestation_inclusion_delay, + "delay" => inclusion_delay, + "epoch" => prev_epoch, + "validator" => id, + ); + } } - metrics::set_int_gauge( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_INCLUSION_DISTANCE, - &[id], - inclusion_delay as i64, - ); + if self.individual_tracking() { + metrics::set_int_gauge( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_INCLUSION_DISTANCE, + &[id], + inclusion_delay as i64, + ); + } } // Indicates the number of sync committee signatures that made it into @@ -536,13 +613,19 @@ impl ValidatorMonitor { let current_epoch = epoch - 1; if let Some(sync_committee) = summary.sync_committee() { if sync_committee.contains(pubkey) { - metrics::set_int_gauge( - &metrics::VALIDATOR_MONITOR_VALIDATOR_IN_CURRENT_SYNC_COMMITTEE, - &[id], - 1, - ); + if self.individual_tracking() { + metrics::set_int_gauge( + &metrics::VALIDATOR_MONITOR_VALIDATOR_IN_CURRENT_SYNC_COMMITTEE, + &[id], + 1, + ); + } let epoch_summary = monitored_validator.summaries.read(); if let Some(summary) = epoch_summary.get(¤t_epoch) { + // This log is not gated by + // `self.individual_tracking()` since the number of + // logs that can be generated is capped by the size + // of the sync committee. info!( self.log, "Current epoch sync signatures"; @@ -552,7 +635,7 @@ impl ValidatorMonitor { "validator" => id, ); } - } else { + } else if self.individual_tracking() { metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_VALIDATOR_IN_CURRENT_SYNC_COMMITTEE, &[id], @@ -693,12 +776,17 @@ impl ValidatorMonitor { let id = &validator.id; let delay = get_block_delay_ms(seen_timestamp, block, slot_clock); - metrics::inc_counter_vec(&metrics::VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL, &[src, id]); - metrics::observe_timer_vec( - &metrics::VALIDATOR_MONITOR_BEACON_BLOCK_DELAY_SECONDS, - &[src, id], - delay, - ); + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL, + &[src, label], + ); + metrics::observe_timer_vec( + &metrics::VALIDATOR_MONITOR_BEACON_BLOCK_DELAY_SECONDS, + &[src, label], + delay, + ); + }); info!( self.log, @@ -764,27 +852,31 @@ impl ValidatorMonitor { if let Some(validator) = self.get_validator(*i) { let id = &validator.id; - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_TOTAL, - &[src, id], - ); - metrics::observe_timer_vec( - &metrics::VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_DELAY_SECONDS, - &[src, id], - delay, - ); + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_TOTAL, + &[src, label], + ); + metrics::observe_timer_vec( + &metrics::VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_DELAY_SECONDS, + &[src, label], + delay, + ); + }); - info!( - self.log, - "Unaggregated attestation"; - "head" => ?data.beacon_block_root, - "index" => %data.index, - "delay_ms" => %delay.as_millis(), - "epoch" => %epoch, - "slot" => %data.slot, - "src" => src, - "validator" => %id, - ); + if self.individual_tracking() { + info!( + self.log, + "Unaggregated attestation"; + "head" => ?data.beacon_block_root, + "index" => %data.index, + "delay_ms" => %delay.as_millis(), + "epoch" => %epoch, + "slot" => %data.slot, + "src" => src, + "validator" => %id, + ); + } validator.with_epoch_summary(epoch, |summary| { summary.register_unaggregated_attestation(delay) @@ -848,50 +940,22 @@ impl ValidatorMonitor { if let Some(validator) = self.get_validator(aggregator_index) { let id = &validator.id; - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_TOTAL, - &[src, id], - ); - metrics::observe_timer_vec( - &metrics::VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_DELAY_SECONDS, - &[src, id], - delay, - ); - - info!( - self.log, - "Aggregated attestation"; - "head" => ?data.beacon_block_root, - "index" => %data.index, - "delay_ms" => %delay.as_millis(), - "epoch" => %epoch, - "slot" => %data.slot, - "src" => src, - "validator" => %id, - ); - - validator.with_epoch_summary(epoch, |summary| { - summary.register_aggregated_attestation(delay) - }); - } - - indexed_attestation.attesting_indices.iter().for_each(|i| { - if let Some(validator) = self.get_validator(*i) { - let id = &validator.id; - + self.aggregatable_metric(id, |label| { metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_TOTAL, - &[src, id], + &metrics::VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_TOTAL, + &[src, label], ); metrics::observe_timer_vec( - &metrics::VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_DELAY_SECONDS, - &[src, id], + &metrics::VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_DELAY_SECONDS, + &[src, label], delay, ); + }); + if self.individual_tracking() { info!( self.log, - "Attestation included in aggregate"; + "Aggregated attestation"; "head" => ?data.beacon_block_root, "index" => %data.index, "delay_ms" => %delay.as_millis(), @@ -900,6 +964,42 @@ impl ValidatorMonitor { "src" => src, "validator" => %id, ); + } + + validator.with_epoch_summary(epoch, |summary| { + summary.register_aggregated_attestation(delay) + }); + } + + indexed_attestation.attesting_indices.iter().for_each(|i| { + if let Some(validator) = self.get_validator(*i) { + let id = &validator.id; + + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_TOTAL, + &[src, label], + ); + metrics::observe_timer_vec( + &metrics::VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_DELAY_SECONDS, + &[src, label], + delay, + ); + }); + + if self.individual_tracking() { + info!( + self.log, + "Attestation included in aggregate"; + "head" => ?data.beacon_block_root, + "index" => %data.index, + "delay_ms" => %delay.as_millis(), + "epoch" => %epoch, + "slot" => %data.slot, + "src" => src, + "validator" => %id, + ); + } validator.with_epoch_summary(epoch, |summary| { summary.register_aggregate_attestation_inclusion() @@ -933,26 +1033,31 @@ impl ValidatorMonitor { if let Some(validator) = self.get_validator(*i) { let id = &validator.id; - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_TOTAL, - &["block", id], - ); - metrics::set_int_gauge( - &metrics::VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_DELAY_SLOTS, - &["block", id], - delay.as_u64() as i64, - ); + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_TOTAL, + &["block", label], + ); + }); - info!( - self.log, - "Attestation included in block"; - "head" => ?data.beacon_block_root, - "index" => %data.index, - "inclusion_lag" => format!("{} slot(s)", delay), - "epoch" => %epoch, - "slot" => %data.slot, - "validator" => %id, - ); + if self.individual_tracking() { + metrics::set_int_gauge( + &metrics::VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_DELAY_SLOTS, + &["block", id], + delay.as_u64() as i64, + ); + + info!( + self.log, + "Attestation included in block"; + "head" => ?data.beacon_block_root, + "index" => %data.index, + "inclusion_lag" => format!("{} slot(s)", delay), + "epoch" => %epoch, + "slot" => %data.slot, + "validator" => %id, + ); + } validator.with_epoch_summary(epoch, |summary| { summary.register_attestation_block_inclusion(inclusion_distance) @@ -1010,26 +1115,30 @@ impl ValidatorMonitor { slot_clock, ); - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_TOTAL, - &[src, id], - ); - metrics::observe_timer_vec( - &metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_DELAY_SECONDS, - &[src, id], - delay, - ); + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_TOTAL, + &[src, label], + ); + metrics::observe_timer_vec( + &metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_DELAY_SECONDS, + &[src, label], + delay, + ); + }); - info!( - self.log, - "Sync committee message"; - "head" => %sync_committee_message.beacon_block_root, - "delay_ms" => %delay.as_millis(), - "epoch" => %epoch, - "slot" => %sync_committee_message.slot, - "src" => src, - "validator" => %id, - ); + if self.individual_tracking() { + info!( + self.log, + "Sync committee message"; + "head" => %sync_committee_message.beacon_block_root, + "delay_ms" => %delay.as_millis(), + "epoch" => %epoch, + "slot" => %sync_committee_message.slot, + "src" => src, + "validator" => %id, + ); + } validator.with_epoch_summary(epoch, |summary| { summary.register_sync_committee_message(delay) @@ -1094,44 +1203,22 @@ impl ValidatorMonitor { if let Some(validator) = self.get_validator(aggregator_index) { let id = &validator.id; - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_TOTAL, - &[src, id], - ); - metrics::observe_timer_vec( - &metrics::VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_DELAY_SECONDS, - &[src, id], - delay, - ); - - info!( - self.log, - "Sync contribution"; - "head" => %beacon_block_root, - "delay_ms" => %delay.as_millis(), - "epoch" => %epoch, - "slot" => %slot, - "src" => src, - "validator" => %id, - ); - - validator.with_epoch_summary(epoch, |summary| { - summary.register_sync_committee_contribution(delay) - }); - } - - for validator_pubkey in participant_pubkeys.iter() { - if let Some(validator) = self.validators.get(validator_pubkey) { - let id = &validator.id; - + self.aggregatable_metric(id, |label| { metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_CONTRIBUTION_TOTAL, - &[src, id], + &metrics::VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_TOTAL, + &[src, label], ); + metrics::observe_timer_vec( + &metrics::VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_DELAY_SECONDS, + &[src, label], + delay, + ); + }); + if self.individual_tracking() { info!( self.log, - "Sync signature included in contribution"; + "Sync contribution"; "head" => %beacon_block_root, "delay_ms" => %delay.as_millis(), "epoch" => %epoch, @@ -1139,6 +1226,36 @@ impl ValidatorMonitor { "src" => src, "validator" => %id, ); + } + + validator.with_epoch_summary(epoch, |summary| { + summary.register_sync_committee_contribution(delay) + }); + } + + for validator_pubkey in participant_pubkeys.iter() { + if let Some(validator) = self.validators.get(validator_pubkey) { + let id = &validator.id; + + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_CONTRIBUTION_TOTAL, + &[src, label], + ); + }); + + if self.individual_tracking() { + info!( + self.log, + "Sync signature included in contribution"; + "head" => %beacon_block_root, + "delay_ms" => %delay.as_millis(), + "epoch" => %epoch, + "slot" => %slot, + "src" => src, + "validator" => %id, + ); + } validator.with_epoch_summary(epoch, |summary| { summary.register_sync_signature_contribution_inclusion() @@ -1160,19 +1277,23 @@ impl ValidatorMonitor { if let Some(validator) = self.validators.get(validator_pubkey) { let id = &validator.id; - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_BLOCK_TOTAL, - &["block", id], - ); + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_BLOCK_TOTAL, + &["block", label], + ); + }); - info!( - self.log, - "Sync signature included in block"; - "head" => %beacon_block_root, - "epoch" => %epoch, - "slot" => %slot, - "validator" => %id, - ); + if self.individual_tracking() { + info!( + self.log, + "Sync signature included in block"; + "head" => %beacon_block_root, + "epoch" => %epoch, + "slot" => %slot, + "validator" => %id, + ); + } validator.with_epoch_summary(epoch, |summary| { summary.register_sync_signature_block_inclusions(); @@ -1201,8 +1322,12 @@ impl ValidatorMonitor { let id = &validator.id; let epoch = exit.epoch; - metrics::inc_counter_vec(&metrics::VALIDATOR_MONITOR_EXIT_TOTAL, &[src, id]); + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec(&metrics::VALIDATOR_MONITOR_EXIT_TOTAL, &[src, label]); + }); + // Not gated behind `self.individual_tracking()` since it's an + // infrequent and interesting message. info!( self.log, "Voluntary exit"; @@ -1240,11 +1365,15 @@ impl ValidatorMonitor { if let Some(validator) = self.get_validator(proposer) { let id = &validator.id; - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_PROPOSER_SLASHING_TOTAL, - &[src, id], - ); + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_PROPOSER_SLASHING_TOTAL, + &[src, label], + ); + }); + // Not gated behind `self.individual_tracking()` since it's an + // infrequent and interesting message. crit!( self.log, "Proposer slashing"; @@ -1293,11 +1422,15 @@ impl ValidatorMonitor { let id = &validator.id; let epoch = data.slot.epoch(T::slots_per_epoch()); - metrics::inc_counter_vec( - &metrics::VALIDATOR_MONITOR_ATTESTER_SLASHING_TOTAL, - &[src, id], - ); + self.aggregatable_metric(id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_ATTESTER_SLASHING_TOTAL, + &[src, label], + ); + }); + // Not gated behind `self.individual_tracking()` since it's an + // infrequent and interesting message. crit!( self.log, "Attester slashing"; @@ -1347,69 +1480,80 @@ impl ValidatorMonitor { /* * Attestations */ - metrics::set_gauge_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_TOTAL, - &[id], - summary.attestations as i64, - ); if let Some(delay) = summary.attestation_min_delay { - metrics::observe_timer_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_MIN_DELAY_SECONDS, + self.aggregatable_metric(id, |tag| { + metrics::observe_timer_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_MIN_DELAY_SECONDS, + &[tag], + delay, + ); + }); + } + if self.individual_tracking() { + metrics::set_gauge_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_TOTAL, &[id], - delay, + summary.attestations as i64, + ); + metrics::set_gauge_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_AGGREGATE_INCLUSIONS, + &[id], + summary.attestation_aggregate_inclusions as i64, ); - } - metrics::set_gauge_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_AGGREGATE_INCLUSIONS, - &[id], - summary.attestation_aggregate_inclusions as i64, - ); - metrics::set_gauge_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_BLOCK_INCLUSIONS, - &[id], - summary.attestation_block_inclusions as i64, - ); - if let Some(distance) = summary.attestation_min_block_inclusion_distance { metrics::set_gauge_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_BLOCK_MIN_INCLUSION_DISTANCE, + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_BLOCK_INCLUSIONS, &[id], - distance.as_u64() as i64, + summary.attestation_block_inclusions as i64, ); + + if let Some(distance) = summary.attestation_min_block_inclusion_distance { + metrics::set_gauge_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_BLOCK_MIN_INCLUSION_DISTANCE, + &[id], + distance.as_u64() as i64, + ); + } } /* * Sync committee messages */ - metrics::set_gauge_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_TOTAL, - &[id], - summary.sync_committee_messages as i64, - ); if let Some(delay) = summary.sync_committee_message_min_delay { - metrics::observe_timer_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_MIN_DELAY_SECONDS, + self.aggregatable_metric(id, |tag| { + metrics::observe_timer_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_MIN_DELAY_SECONDS, + &[tag], + delay, + ); + }); + } + if self.individual_tracking() { + metrics::set_gauge_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_TOTAL, &[id], - delay, + summary.sync_committee_messages as i64, + ); + metrics::set_gauge_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_INCLUSIONS, + &[id], + summary.sync_signature_contribution_inclusions as i64, + ); + metrics::set_gauge_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_SIGNATURE_BLOCK_INCLUSIONS, + &[id], + summary.sync_signature_block_inclusions as i64, ); } - metrics::set_gauge_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_INCLUSIONS, - &[id], - summary.sync_signature_contribution_inclusions as i64, - ); - metrics::set_gauge_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_SIGNATURE_BLOCK_INCLUSIONS, - &[id], - summary.sync_signature_block_inclusions as i64, - ); /* * Sync contributions */ - metrics::set_gauge_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTIONS_TOTAL, - &[id], - summary.sync_contributions as i64, - ); + if self.individual_tracking() { + metrics::set_gauge_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTIONS_TOTAL, + &[id], + summary.sync_contributions as i64, + ); + } if let Some(delay) = summary.sync_contribution_min_delay { metrics::observe_timer_vec( &metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_MIN_DELAY_SECONDS, @@ -1421,51 +1565,61 @@ impl ValidatorMonitor { /* * Blocks */ - metrics::set_gauge_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_BEACON_BLOCKS_TOTAL, - &[id], - summary.blocks as i64, - ); - if let Some(delay) = summary.block_min_delay { - metrics::observe_timer_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_BEACON_BLOCKS_MIN_DELAY_SECONDS, + if self.individual_tracking() { + metrics::set_gauge_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_BEACON_BLOCKS_TOTAL, &[id], - delay, + summary.blocks as i64, ); } + if let Some(delay) = summary.block_min_delay { + self.aggregatable_metric(id, |tag| { + metrics::observe_timer_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_BEACON_BLOCKS_MIN_DELAY_SECONDS, + &[tag], + delay, + ); + }); + } /* * Aggregates */ - metrics::set_gauge_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_TOTAL, - &[id], - summary.aggregates as i64, - ); - if let Some(delay) = summary.aggregate_min_delay { - metrics::observe_timer_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_MIN_DELAY_SECONDS, + if self.individual_tracking() { + metrics::set_gauge_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_TOTAL, &[id], - delay, + summary.aggregates as i64, ); } + if let Some(delay) = summary.aggregate_min_delay { + self.aggregatable_metric(id, |tag| { + metrics::observe_timer_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_MIN_DELAY_SECONDS, + &[tag], + delay, + ); + }); + } /* * Other */ - metrics::set_gauge_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_EXITS_TOTAL, - &[id], - summary.exits as i64, - ); - metrics::set_gauge_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_PROPOSER_SLASHINGS_TOTAL, - &[id], - summary.proposer_slashings as i64, - ); - metrics::set_gauge_vec( - &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTER_SLASHINGS_TOTAL, - &[id], - summary.attester_slashings as i64, - ); + if self.individual_tracking() { + metrics::set_gauge_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_EXITS_TOTAL, + &[id], + summary.exits as i64, + ); + metrics::set_gauge_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_PROPOSER_SLASHINGS_TOTAL, + &[id], + summary.proposer_slashings as i64, + ); + metrics::set_gauge_vec( + &metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTER_SLASHINGS_TOTAL, + &[id], + summary.attester_slashings as i64, + ); + } } } } diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index b2fc7a6402e..8a6ea9cfe1a 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5,6 +5,7 @@ use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::test_utils::{ test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, }; +use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; use beacon_chain::{ historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer, @@ -2121,7 +2122,7 @@ async fn weak_subjectivity_sync() { log.clone(), 1, ))) - .monitor_validators(true, vec![], log) + .monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log) .build() .expect("should build"), ); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index f3e937b2e5f..3b016ebda9c 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -173,6 +173,7 @@ where .monitor_validators( config.validator_monitor_auto, config.validator_monitor_pubkeys.clone(), + config.validator_monitor_individual_tracking_threshold, runtime_context .service_context("val_mon".to_string()) .log() diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 0a2997762a9..22b868256ad 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,3 +1,4 @@ +use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; use directory::DEFAULT_ROOT_DIR; use environment::LoggerConfig; use network::NetworkConfig; @@ -59,6 +60,11 @@ pub struct Config { pub validator_monitor_auto: bool, /// A list of validator pubkeys to monitor. pub validator_monitor_pubkeys: Vec, + /// Once the number of monitored validators goes above this threshold, we + /// will stop tracking metrics on a per-validator basis. This prevents large + /// validator counts causing infeasibly high cardinailty for Prometheus and + /// high log volumes. + pub validator_monitor_individual_tracking_threshold: usize, #[serde(skip)] /// The `genesis` field is not serialized or deserialized by `serde` to ensure it is defined /// via the CLI at runtime, instead of from a configuration file saved to disk. @@ -97,6 +103,7 @@ impl Default for Config { slasher: None, validator_monitor_auto: false, validator_monitor_pubkeys: vec![], + validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, logger_config: LoggerConfig::default(), } } diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 30f030eba7a..9e1c9f51bcc 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -2,6 +2,7 @@ use super::*; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::CachingEth1Backend, + validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, BeaconChain, }; use futures::prelude::*; @@ -75,7 +76,7 @@ impl TestBeaconChain { Duration::from_millis(SLOT_DURATION_MILLIS), )) .shutdown_sender(shutdown_tx) - .monitor_validators(true, vec![], log) + .monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log) .build() .expect("should build"), ); diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 915872e2b5e..38d81512e4b 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -753,6 +753,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("PATH") .takes_value(true) ) + .arg( + Arg::with_name("validator-monitor-individual-tracking-threshold") + .long("validator-monitor-individual-tracking-threshold") + .help("Once the validator monitor reaches this number of local validators \ + it will stop collecting per-validator Prometheus metrics and issuing \ + per-validator logs. Instead, it will provide aggregate metrics and logs. \ + This avoids infeasibly high cardinality in the Prometheus database and \ + high log volume when using many validators. Defaults to 64.") + .value_name("INTEGER") + .takes_value(true) + ) .arg( Arg::with_name("disable-lock-timeouts") .long("disable-lock-timeouts") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 241a5952eec..294568cca9f 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -675,6 +675,12 @@ pub fn get_config( .extend_from_slice(&pubkeys); } + if let Some(count) = + clap_utils::parse_optional(cli_args, "validator-monitor-individual-tracking-threshold")? + { + client_config.validator_monitor_individual_tracking_threshold = count; + } + if cli_args.is_present("disable-lock-timeouts") { client_config.chain.enable_lock_timeouts = false; } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index bd96885c68e..4a2e160e8bc 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1237,6 +1237,31 @@ fn validator_monitor_file_flag() { assert_eq!(config.validator_monitor_pubkeys[1].to_string(), "0xbeefdeadbeefdeaddeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); }); } +#[test] +fn validator_monitor_metrics_threshold_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.validator_monitor_individual_tracking_threshold, + // If this value changes make sure to update the help text for + // the CLI command. + 64 + ) + }); +} +#[test] +fn validator_monitor_metrics_threshold_custom() { + CommandLineTest::new() + .flag( + "validator-monitor-individual-tracking-threshold", + Some("42"), + ) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.validator_monitor_individual_tracking_threshold, 42) + }); +} // Tests for Store flags. #[test] From c85cd87c10dd6c0532e5958dde1d05e31c04d847 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Mon, 9 Jan 2023 08:18:56 +0000 Subject: [PATCH 18/23] Web3 signer validator definitions reloading on any request (#3801) ## Issue Addressed https://github.com/sigp/lighthouse/issues/3795 Co-authored-by: realbigsean --- validator_client/src/initialized_validators.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index d3f6b1eb69a..7fe2f5f8ecd 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -994,17 +994,17 @@ impl InitializedValidators { let mut disabled_uuids = HashSet::new(); for def in self.definitions.as_slice() { if def.enabled { + let pubkey_bytes = def.voting_public_key.compress(); + + if self.validators.contains_key(&pubkey_bytes) { + continue; + } + match &def.signing_definition { SigningDefinition::LocalKeystore { voting_keystore_path, .. } => { - let pubkey_bytes = def.voting_public_key.compress(); - - if self.validators.contains_key(&pubkey_bytes) { - continue; - } - if let Some(key_store) = key_stores.get(voting_keystore_path) { disabled_uuids.remove(key_store.uuid()); } From 0c74cd4696d8fdc7ec43d1ee5d8cafb33781b8f3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 9 Jan 2023 23:29:23 +0000 Subject: [PATCH 19/23] Update dependencies incl Tokio (#3866) ## Proposed Changes Update all dependencies to new semver-compatible releases with `cargo update`. Importantly this patches a Tokio vuln: https://rustsec.org/advisories/RUSTSEC-2023-0001. I don't think we were affected by the vuln because it only applies to named pipes on Windows, but it's still good hygiene to patch. --- Cargo.lock | 315 ++++++++++++++++++++++++----------------------------- 1 file changed, 140 insertions(+), 175 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 200c1fc9bd9..d1329d74602 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -205,24 +205,24 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.66" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" +checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" [[package]] name = "arbitrary" -version = "1.2.0" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29d47fbf90d5149a107494b15a7dc8d69b351be2db3bb9691740e88ec17fd880" +checksum = "b0224938f92e7aef515fac2ff2d18bd1115c1394ddf4a092e0c87e8be9499ee5" dependencies = [ "derive_arbitrary", ] [[package]] name = "arc-swap" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164" +checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "arrayref" @@ -245,7 +245,7 @@ dependencies = [ "asn1-rs-derive 0.1.0", "asn1-rs-impl", "displaydoc", - "nom 7.1.1", + "nom 7.1.2", "num-traits", "rusticata-macros", "thiserror", @@ -261,7 +261,7 @@ dependencies = [ "asn1-rs-derive 0.4.0", "asn1-rs-impl", "displaydoc", - "nom 7.1.1", + "nom 7.1.2", "num-traits", "rusticata-macros", "thiserror", @@ -326,7 +326,7 @@ dependencies = [ "slab", "socket2", "waker-fn", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -362,9 +362,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.59" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6e93155431f3931513b243d371981bb2770112b370c82745a1d19d2f99364" +checksum = "705339e0e4a9690e2908d2b3d049d85682cf19fbd5782494498fbf7003a6a282" dependencies = [ "proc-macro2", "quote", @@ -418,7 +418,7 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", "winapi", ] @@ -464,7 +464,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.4", + "itoa 1.0.5", "matchit", "memchr", "mime", @@ -704,9 +704,9 @@ dependencies = [ [[package]] name = "blake2" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ "digest 0.10.6", ] @@ -872,9 +872,9 @@ dependencies = [ [[package]] name = "bzip2" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6afcd980b5f3a45017c57e57a2fcccbb351cc43a356ce117ef760ef8052b89b0" +checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" dependencies = [ "bzip2-sys", "libc", @@ -935,7 +935,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom 7.1.1", + "nom 7.1.2", ] [[package]] @@ -1395,7 +1395,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1631ca6e3c59112501a9d87fd86f21591ff77acd31331e8a73f8d80a65bbdd71" dependencies = [ "nix 0.26.1", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -1427,9 +1427,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.83" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf07d07d6531bfcdbe9b8b739b104610c6508dcc4d63b410585faf338241daf" +checksum = "51d1075c37807dcf850c379432f0df05ba52cc30f279c5cfc43cc221ce7f8579" dependencies = [ "cc", "cxxbridge-flags", @@ -1439,9 +1439,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.83" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2eb5b96ecdc99f72657332953d4d9c50135af1bac34277801cc3937906ebd39" +checksum = "5044281f61b27bc598f2f6647d480aed48d2bf52d6eb0b627d84c0361b17aa70" dependencies = [ "cc", "codespan-reporting", @@ -1454,15 +1454,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.83" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac040a39517fd1674e0f32177648334b0f4074625b5588a64519804ba0553b12" +checksum = "61b50bc93ba22c27b0d31128d2d130a0a6b3d267ae27ef7e4fae2167dfe8781c" [[package]] name = "cxxbridge-macro" -version = "1.0.83" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1362b0ddcfc4eb0a1f57b68bd77dd99f0e826958a96abd0ae9bd092e114ffed6" +checksum = "39e61fda7e62115119469c7b3591fd913ecca96fb766cfd3f2e2502ab7bc87a5" dependencies = [ "proc-macro2", "quote", @@ -1652,7 +1652,7 @@ checksum = "fe398ac75057914d7d07307bf67dc7f3f574a26783b4fc7805a20ffa9f506e82" dependencies = [ "asn1-rs 0.3.1", "displaydoc", - "nom 7.1.1", + "nom 7.1.2", "num-bigint", "num-traits", "rusticata-macros", @@ -1666,7 +1666,7 @@ checksum = "42d4bc9b0db0a0df9ae64634ac5bdefb7afcb534e182275ca0beadbe486701c1" dependencies = [ "asn1-rs 0.5.1", "displaydoc", - "nom 7.1.1", + "nom 7.1.2", "num-bigint", "num-traits", "rusticata-macros", @@ -1685,9 +1685,9 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8a16495aeb28047bb1185fca837baf755e7d71ed3aeed7f8504654ffa927208" +checksum = "cf460bbff5f571bfc762da5102729f59f338be7db17a21fade44c5c4f5005350" dependencies = [ "proc-macro2", "quote", @@ -1855,9 +1855,9 @@ dependencies = [ [[package]] name = "dtoa" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8a6eee2d5d0d113f015688310da018bd1d864d86bd567c8fca9c266889e1bfa" +checksum = "c00704156a7de8df8da0911424e30c2049957b0a714542a44e05fe693dd85313" [[package]] name = "ecdsa" @@ -2945,9 +2945,9 @@ dependencies = [ [[package]] name = "glob" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "group" @@ -3076,6 +3076,15 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" +dependencies = [ + "libc", +] + [[package]] name = "hex" version = "0.4.3" @@ -3166,7 +3175,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.4", + "itoa 1.0.5", ] [[package]] @@ -3283,7 +3292,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.4", + "itoa 1.0.5", "pin-project-lite 0.2.9", "socket2", "tokio", @@ -3530,9 +3539,9 @@ dependencies = [ [[package]] name = "interceptor" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ffaa4d24f546a18eaeee91f7b2c52e080e20e285e43cd7c27a527b4712cfdad" +checksum = "1e8a11ae2da61704edada656798b61c94b35ecac2c58eb955156987d5e6be90b" dependencies = [ "async-trait", "bytes", @@ -3561,9 +3570,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.7.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11b0d96e660696543b251e58030cf9787df56da39dab19ad60eae7353040917e" +checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" [[package]] name = "itertools" @@ -3582,9 +3591,9 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" +checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" [[package]] name = "js-sys" @@ -3732,9 +3741,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.138" +version = "0.2.139" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8" +checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" [[package]] name = "libflate" @@ -3942,9 +3951,9 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.41.0" +version = "0.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "647d6a99f8d5b7366ee6bcc608ec186e2fb58b497cf914c8409b803bd0f594a2" +checksum = "c052d0026f4817b44869bfb6810f4e1112f43aec8553f2cb38881c524b563abf" dependencies = [ "asynchronous-codec", "futures", @@ -4382,9 +4391,9 @@ dependencies = [ [[package]] name = "link-cplusplus" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" +checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" dependencies = [ "cc", ] @@ -4687,7 +4696,7 @@ dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -5036,9 +5045,9 @@ checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" [[package]] name = "nom" -version = "7.1.1" +version = "7.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" +checksum = "e5507769c4919c998e69e49c839d9dc6e693ede4cc4290d6ad8b41d4f09c548c" dependencies = [ "memchr", "minimal-lexical", @@ -5125,11 +5134,11 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ - "hermit-abi", + "hermit-abi 0.2.6", "libc", ] @@ -5144,9 +5153,9 @@ dependencies = [ [[package]] name = "object" -version = "0.30.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239da7f290cfa979f43f85a8efeee9a8a76d0827c356d37f9d3d7254d6b537fb" +checksum = "8d864c91689fdc196779b98dba0aceac6118594c2df6ee5d943eb6a8df4d107a" dependencies = [ "memchr", ] @@ -5171,9 +5180,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" +checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" [[package]] name = "oneshot_broadcast" @@ -5221,9 +5230,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.44" +version = "0.10.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29d971fd5722fec23977260f6e81aa67d2f22cadbdc2aa049f1022d9a3be1566" +checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" dependencies = [ "bitflags", "cfg-if", @@ -5262,9 +5271,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.79" +version = "0.9.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5454462c0eced1e97f2ec09036abc8da362e66802f66fd20f86854d9d8cbcbc4" +checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" dependencies = [ "autocfg 1.1.0", "cc", @@ -5438,14 +5447,14 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] name = "paste" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1c2c742266c2f1041c914ba65355a83ae8747b05f208319784083583494b4b" +checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" [[package]] name = "pbkdf2" @@ -5497,9 +5506,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc8bed3549e0f9b0a2a78bf7c0018237a2cdf085eecbbc048e52612438e4e9d0" +checksum = "0f6e86fb9e7026527a0d46bc308b841d73170ef8f443e1807f6ef88526a816d4" dependencies = [ "thiserror", "ucd-trie", @@ -5630,7 +5639,7 @@ dependencies = [ "libc", "log", "wepoll-ffi", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -5675,9 +5684,9 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c142c0e46b57171fe0c528bee8c5b7569e80f0c17e377cd0e30ea57dbc11bb51" +checksum = "e97e3215779627f01ee256d2fad52f3d95e8e1c11e9fc6fd08f7cd455d5d5c78" dependencies = [ "proc-macro2", "syn", @@ -5746,15 +5755,15 @@ dependencies = [ [[package]] name = "proc-macro-hack" -version = "0.5.19" +version = "0.5.20+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.47" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" dependencies = [ "unicode-ident", ] @@ -5793,7 +5802,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83cd1b99916654a69008fd66b4f9397fbe08e6e51dfe23d4417acf5d3b8cb87c" dependencies = [ "dtoa", - "itoa 1.0.4", + "itoa 1.0.5", "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] @@ -5811,9 +5820,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.11.3" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0b18e655c21ff5ac2084a5ad0611e827b3f92badf79f4910b5a5c58f4d87ff0" +checksum = "c01db6702aa05baa3f57dec92b8eeeeb4cb19e894e73996b32a4093289e54592" dependencies = [ "bytes", "prost-derive", @@ -5821,9 +5830,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276470f7f281b0ed53d2ae42dd52b4a8d08853a3c70e7fe95882acbb98a6ae94" +checksum = "cb5320c680de74ba083512704acb90fe00f28f79207286a848e730c45dd73ed6" dependencies = [ "bytes", "heck", @@ -5856,9 +5865,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.2" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "164ae68b6587001ca506d3bf7f1000bfa248d0e1217b618108fba4ec1d0cc306" +checksum = "c8842bad1a5419bca14eac663ba798f6bc19c413c2fdceb5f3ba3b0932d96720" dependencies = [ "anyhow", "itertools", @@ -5869,9 +5878,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.11.2" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747761bc3dc48f9a34553bf65605cf6cb6288ba219f3450b4275dbd81539551a" +checksum = "017f79637768cde62820bc2d4fe0e45daaa027755c323ad077767c6c5f173091" dependencies = [ "bytes", "prost", @@ -5975,9 +5984,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.21" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" dependencies = [ "proc-macro2", ] @@ -6164,9 +6173,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" dependencies = [ "aho-corasick", "memchr", @@ -6315,9 +6324,9 @@ dependencies = [ [[package]] name = "rtcp" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c11171e7e37998dcf54a9e9d4a6e2e1932c994424c7d39bc6349fed1424c45c3" +checksum = "1919efd6d4a6a85d13388f9487549bb8e359f17198cc03ffd72f79b553873691" dependencies = [ "bytes", "thiserror", @@ -6410,7 +6419,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.14", + "semver 1.0.16", ] [[package]] @@ -6419,7 +6428,7 @@ version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" dependencies = [ - "nom 7.1.1", + "nom 7.1.2", ] [[package]] @@ -6458,9 +6467,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" +checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" [[package]] name = "rw-stream-sink" @@ -6475,9 +6484,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" +checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" [[package]] name = "safe_arith" @@ -6533,12 +6542,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" +checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "lazy_static", - "windows-sys 0.36.1", + "windows-sys", ] [[package]] @@ -6564,9 +6572,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" +checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" [[package]] name = "scrypt" @@ -6687,9 +6695,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" +checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" [[package]] name = "semver-parser" @@ -6722,9 +6730,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.150" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e326c9ec8042f1b5da33252c8a37e9ffbd2c9bef0155215b6e6c80c790e05f91" +checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" dependencies = [ "serde_derive", ] @@ -6751,9 +6759,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.150" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a3df25b0713732468deadad63ab9da1f1fd75a48a15024b50363f128db627e" +checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" dependencies = [ "proc-macro2", "quote", @@ -6762,20 +6770,20 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.89" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" +checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" dependencies = [ - "itoa 1.0.4", + "itoa 1.0.5", "ryu", "serde", ] [[package]] name = "serde_repr" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" +checksum = "9a5ec9fa74a20ebbe5d9ac23dac1fc96ba0ecfe9f50f2843b52e537b10fbcb4e" dependencies = [ "proc-macro2", "quote", @@ -6789,7 +6797,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.4", + "itoa 1.0.5", "ryu", "serde", ] @@ -7419,9 +7427,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.105" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" +checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" dependencies = [ "proc-macro2", "quote", @@ -7594,18 +7602,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" dependencies = [ "proc-macro2", "quote", @@ -7647,7 +7655,7 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ - "itoa 1.0.4", + "itoa 1.0.5", "libc", "num_threads", "serde", @@ -7736,9 +7744,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.23.0" +version = "1.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eab6d665857cc6ca78d6e80303a02cea7a7851e85dfbd77cbdc09bd129f1ef46" +checksum = "1d9f76183f91ecfb55e1d7d5602bd1d979e38a3a522fe900241cf195624d67ae" dependencies = [ "autocfg 1.1.0", "bytes", @@ -7751,7 +7759,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -8109,9 +8117,9 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tungstenite" @@ -8280,9 +8288,9 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" +checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" [[package]] name = "unicode-normalization" @@ -8924,9 +8932,9 @@ dependencies = [ [[package]] name = "webrtc-mdns" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2548ae970afc2ae8e0b0dbfdacd9602d426d4f0ff6cda4602a45c0fd7ceaa82a" +checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" dependencies = [ "log", "socket2", @@ -9101,19 +9109,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "windows-sys" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" -dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", -] - [[package]] name = "windows-sys" version = "0.42.0" @@ -9141,12 +9136,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" -[[package]] -name = "windows_aarch64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" - [[package]] name = "windows_aarch64_msvc" version = "0.42.0" @@ -9159,12 +9148,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" -[[package]] -name = "windows_i686_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" - [[package]] name = "windows_i686_gnu" version = "0.42.0" @@ -9177,12 +9160,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" -[[package]] -name = "windows_i686_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" - [[package]] name = "windows_i686_msvc" version = "0.42.0" @@ -9195,12 +9172,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" -[[package]] -name = "windows_x86_64_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" - [[package]] name = "windows_x86_64_gnu" version = "0.42.0" @@ -9219,12 +9190,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" -[[package]] -name = "windows_x86_64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" - [[package]] name = "windows_x86_64_msvc" version = "0.42.0" @@ -9306,7 +9271,7 @@ dependencies = [ "data-encoding", "der-parser 7.0.0", "lazy_static", - "nom 7.1.1", + "nom 7.1.2", "oid-registry 0.4.0", "ring", "rusticata-macros", @@ -9325,7 +9290,7 @@ dependencies = [ "data-encoding", "der-parser 8.1.0", "lazy_static", - "nom 7.1.1", + "nom 7.1.2", "oid-registry 0.6.1", "rusticata-macros", "thiserror", From 38514c07f222ff7783834c48cf5c0a6ee7f346d0 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 11 Jan 2023 03:27:08 +0000 Subject: [PATCH 20/23] Release v3.4.0 (#3862) ## Issue Addressed NA ## Proposed Changes Bump versions ## Additional Info - [x] ~~Blocked on #3728, #3801~~ - [x] ~~Blocked on #3866~~ - [x] Requires additional testing --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d1329d74602..f1daf4dbdfb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -612,7 +612,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "3.3.0" +version = "3.4.0" dependencies = [ "beacon_chain", "clap", @@ -780,7 +780,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "3.3.0" +version = "3.4.0" dependencies = [ "beacon_node", "clap", @@ -3682,7 +3682,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "3.3.0" +version = "3.4.0" dependencies = [ "account_utils", "beacon_chain", @@ -4285,7 +4285,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "3.3.0" +version = "3.4.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index d47f77da930..cca8cc969ef 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "3.3.0" +version = "3.4.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index afcbae513b4..3c136b18b92 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v3.3.0-", - fallback = "Lighthouse/v3.3.0" + prefix = "Lighthouse/v3.4.0-", + fallback = "Lighthouse/v3.4.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 638ab46bfbf..93e529755d7 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "3.3.0" +version = "3.4.0" authors = ["Paul Hauner "] edition = "2021" diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index b0c0580b084..f9597ade8d4 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "3.3.0" +version = "3.4.0" authors = ["Sigma Prime "] edition = "2021" autotests = false From 4d9e137e6ad4e738a976164d393f2e0b4896a00f Mon Sep 17 00:00:00 2001 From: Madman600 <38760981+Madman600@users.noreply.github.com> Date: Mon, 16 Jan 2023 03:42:08 +0000 Subject: [PATCH 21/23] Update checkpoint-sync.md (#3831) Remove infura checkpoint sync instructions. Co-authored-by: Adam Patacchiola --- book/src/checkpoint-sync.md | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 736aa08f1cf..893c545cb93 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -48,17 +48,6 @@ The Ethereum community provides various [public endpoints](https://eth-clients.g lighthouse bn --checkpoint-sync-url https://example.com/ ... ``` -### Use Infura as a remote beacon node provider - -You can use Infura as the remote beacon node provider to load the initial checkpoint state. - -1. Sign up for the free Infura ETH2 API using the `Create new project tab` on the [Infura dashboard](https://infura.io/dashboard). -2. Copy the HTTPS endpoint for the required network (Mainnet/Prater). -3. Use it as the url for the `--checkpoint-sync-url` flag. e.g. -``` -lighthouse bn --checkpoint-sync-url https://:@eth2-beacon-mainnet.infura.io ... -``` - ## Backfilling Blocks Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks From 912ea2a5cab0ba8c4a9ac9fd4f3717e50a332301 Mon Sep 17 00:00:00 2001 From: Santiago Medina Date: Mon, 16 Jan 2023 03:42:09 +0000 Subject: [PATCH 22/23] Return HTTP 404 rather than 405 (#3836) ## Issue Addressed Issue #3112 ## Proposed Changes Add `Filter::recover` to the GET chain to handle rejections specifically as 404 NOT FOUND ## Additional Info Making a request to `http://localhost:5052/not_real` now returns the following: ``` { "code": 404, "message": "NOT_FOUND", "stacktraces": [] } ``` Co-authored-by: Paul Hauner --- beacon_node/http_api/src/lib.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 6cfdaf5db6a..8cd0b856b51 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3383,7 +3383,8 @@ pub fn serve( .or(get_lighthouse_attestation_performance.boxed()) .or(get_lighthouse_block_packing_efficiency.boxed()) .or(get_lighthouse_merge_readiness.boxed()) - .or(get_events.boxed()), + .or(get_events.boxed()) + .recover(warp_utils::reject::handle_rejection), ) .boxed() .or(warp::post().and( @@ -3407,7 +3408,8 @@ pub fn serve( .or(post_lighthouse_database_reconstruct.boxed()) .or(post_lighthouse_database_historical_blocks.boxed()) .or(post_lighthouse_block_rewards.boxed()) - .or(post_lighthouse_ui_validator_metrics.boxed()), + .or(post_lighthouse_ui_validator_metrics.boxed()) + .recover(warp_utils::reject::handle_rejection), )) .recover(warp_utils::reject::handle_rejection) .with(slog_logging(log.clone())) From 6ac1c5b43951f26f18df8e0b7553fa93c30e0250 Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 16 Jan 2023 03:42:10 +0000 Subject: [PATCH 23/23] Add CLI flag to specify the format of logs written to the logfile (#3839) ## Proposed Changes Decouple the stdout and logfile formats by adding the `--logfile-format` CLI flag. This behaves identically to the existing `--log-format` flag, but instead will only affect the logs written to the logfile. The `--log-format` flag will no longer have any effect on the contents of the logfile. ## Additional Info This avoids being a breaking change by causing `logfile-format` to default to the value of `--log-format` if it is not provided. This means that users who were previously relying on being able to use a JSON formatted logfile will be able to continue to use `--log-format JSON`. Users who want to use JSON on stdout and default logs in the logfile, will need to pass the following flags: `--log-format JSON --logfile-format DEFAULT` --- lcli/src/main.rs | 1 + lighthouse/environment/src/lib.rs | 4 +++- lighthouse/src/main.rs | 15 +++++++++++++++ lighthouse/tests/beacon_node.rs | 19 ++++++++++++++++++- testing/simulator/src/eth1_sim.rs | 1 + testing/simulator/src/no_eth1_sim.rs | 1 + testing/simulator/src/sync_sim.rs | 1 + 7 files changed, 40 insertions(+), 2 deletions(-) diff --git a/lcli/src/main.rs b/lcli/src/main.rs index de6039f35a0..137a4534b46 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -792,6 +792,7 @@ fn run( debug_level: String::from("trace"), logfile_debug_level: String::from("trace"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index fad7edeb196..8ef67e82ddb 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -50,6 +50,7 @@ pub struct LoggerConfig { pub debug_level: String, pub logfile_debug_level: String, pub log_format: Option, + pub logfile_format: Option, pub log_color: bool, pub disable_log_timestamp: bool, pub max_log_size: u64, @@ -64,6 +65,7 @@ impl Default for LoggerConfig { debug_level: String::from("info"), logfile_debug_level: String::from("debug"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 200, @@ -252,7 +254,7 @@ impl EnvironmentBuilder { let file_logger = FileLoggerBuilder::new(&path) .level(logfile_level) .channel_size(LOG_CHANNEL_SIZE) - .format(match config.log_format.as_deref() { + .format(match config.logfile_format.as_deref() { Some("JSON") => Format::Json, _ => Format::default(), }) diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index da72204f967..64ee0432f8a 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -99,6 +99,15 @@ fn main() { .default_value("debug") .global(true), ) + .arg( + Arg::with_name("logfile-format") + .long("logfile-format") + .value_name("FORMAT") + .help("Specifies the log format used when emitting logs to the logfile.") + .possible_values(&["DEFAULT", "JSON"]) + .takes_value(true) + .global(true) + ) .arg( Arg::with_name("logfile-max-size") .long("logfile-max-size") @@ -402,6 +411,11 @@ fn run( .value_of("logfile-debug-level") .ok_or("Expected --logfile-debug-level flag")?; + let logfile_format = matches + .value_of("logfile-format") + // Ensure that `logfile-format` defaults to the value of `log-format`. + .or_else(|| matches.value_of("log-format")); + let logfile_max_size: u64 = matches .value_of("logfile-max-size") .ok_or("Expected --logfile-max-size flag")? @@ -452,6 +466,7 @@ fn run( debug_level: String::from(debug_level), logfile_debug_level: String::from(logfile_debug_level), log_format: log_format.map(String::from), + logfile_format: logfile_format.map(String::from), log_color, disable_log_timestamp, max_log_size: logfile_max_size * 1_024 * 1_024, diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 4a2e160e8bc..7e581ee6152 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1662,7 +1662,24 @@ fn logfile_no_restricted_perms_flag() { assert!(config.logger_config.is_restricted == false); }); } - +#[test] +fn logfile_format_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.logger_config.logfile_format, None)); +} +#[test] +fn logfile_format_flag() { + CommandLineTest::new() + .flag("logfile-format", Some("JSON")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.logger_config.logfile_format, + Some("JSON".to_string()) + ) + }); +} #[test] fn sync_eth1_chain_default() { CommandLineTest::new() diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 8284bff6096..42aefea7a53 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -62,6 +62,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { debug_level: String::from("debug"), logfile_debug_level: String::from("debug"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 53c4447da2c..1a026ded46d 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -47,6 +47,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { debug_level: String::from("debug"), logfile_debug_level: String::from("debug"), log_format: None, + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0, diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 1c8b41f0573..9d759715eba 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -51,6 +51,7 @@ fn syncing_sim( debug_level: String::from(log_level), logfile_debug_level: String::from("debug"), log_format: log_format.map(String::from), + logfile_format: None, log_color: false, disable_log_timestamp: false, max_log_size: 0,