diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ef23e1ed573..a408fcdd52f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -45,8 +45,8 @@ questions. 2. **Work in a feature branch** of your personal fork (github.com/YOUR_NAME/lighthouse) of the main repository (github.com/sigp/lighthouse). -3. Once you feel you have addressed the issue, **create a pull-request** to merge - your changes into the main repository. +3. Once you feel you have addressed the issue, **create a pull-request** with + `unstable` as the base branch to merge your changes into the main repository. 4. Wait for the repository maintainers to **review your changes** to ensure the issue is addressed satisfactorily. Optionally, mention your PR on [discord](https://discord.gg/cyAszAh). diff --git a/Cargo.lock b/Cargo.lock index a8f6cfe9818..9a462f6c044 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -423,13 +423,14 @@ checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" [[package]] name = "attohttpc" -version = "0.10.1" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf13118df3e3dce4b5ac930641343b91b656e4e72c8f8325838b01a4b1c9d45" +checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247" dependencies = [ "http", "log", "url", + "wildmatch", ] [[package]] @@ -3521,13 +3522,13 @@ dependencies = [ [[package]] name = "igd" -version = "0.11.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd32c880165b2f776af0b38d206d1cabaebcf46c166ac6ae004a5d45f7d48ef" +checksum = "556b5a75cd4adb7c4ea21c64af1c48cefb2ce7d43dc4352c720a1fe47c21f355" dependencies = [ "attohttpc", "log", - "rand 0.7.3", + "rand 0.8.5", "url", "xmltree", ] @@ -9496,6 +9497,12 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" +[[package]] +name = "wildmatch" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f44b95f62d34113cf558c93511ac93027e03e9c29a60dd0fd70e6e025c7270a" + [[package]] name = "winapi" version = "0.3.9" diff --git a/Dockerfile b/Dockerfile index 0d268c7e1aa..be01ad7c572 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,9 @@ FROM rust:1.68.2-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG FEATURES +ARG PROFILE=release ENV FEATURES $FEATURES +ENV PROFILE $PROFILE RUN cd lighthouse && make FROM ubuntu:22.04 diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ca0c5ce15b7..49630096cee 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -420,6 +420,8 @@ pub struct BeaconChain { pub slasher: Option>>, /// Provides monitoring of a set of explicitly defined validators. pub validator_monitor: RwLock>, + /// The slot at which blocks are downloaded back to. + pub genesis_backfill_slot: Slot, } type BeaconBlockAndState = (BeaconBlock, BeaconState); diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 56006b4d622..ca377635d66 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -772,6 +772,29 @@ where let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); let shuffling_cache_size = self.chain_config.shuffling_cache_size; + // Calculate the weak subjectivity point in which to backfill blocks to. + let genesis_backfill_slot = if self.chain_config.genesis_backfill { + Slot::new(0) + } else { + let backfill_epoch_range = (self.spec.min_validator_withdrawability_delay + + self.spec.churn_limit_quotient) + .as_u64() + / 2; + match slot_clock.now() { + Some(current_slot) => { + let genesis_backfill_epoch = current_slot + .epoch(TEthSpec::slots_per_epoch()) + .saturating_sub(backfill_epoch_range); + genesis_backfill_epoch.start_slot(TEthSpec::slots_per_epoch()) + } + None => { + // The slot clock cannot derive the current slot. We therefore assume we are + // at or prior to genesis and backfill should sync all the way to genesis. + Slot::new(0) + } + } + }; + let beacon_chain = BeaconChain { spec: self.spec, config: self.chain_config, @@ -839,6 +862,7 @@ where graffiti: self.graffiti, slasher: self.slasher.clone(), validator_monitor: RwLock::new(validator_monitor), + genesis_backfill_slot, }; let head = beacon_chain.head_snapshot(); diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 9921435313d..a74fdced1f6 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -73,6 +73,9 @@ pub struct ChainConfig { pub optimistic_finalized_sync: bool, /// The size of the shuffling cache, pub shuffling_cache_size: usize, + /// If using a weak-subjectivity sync, whether we should download blocks all the way back to + /// genesis. + pub genesis_backfill: bool, /// Whether to send payload attributes every slot, regardless of connected proposers. /// /// This is useful for block builders and testing. @@ -106,6 +109,7 @@ impl Default for ChainConfig { // This value isn't actually read except in tests. optimistic_finalized_sync: true, shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, + genesis_backfill: false, always_prepare_payload: false, enable_backfill_rate_limiting: true, } diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index cc45a6bb9a9..5f590735004 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -189,13 +189,17 @@ impl BeaconChain { oldest_block_parent: expected_block_root, ..anchor_info }; - let backfill_complete = new_anchor.block_backfill_complete(); + let backfill_complete = new_anchor.block_backfill_complete(self.genesis_backfill_slot); self.store .compare_and_set_anchor_info_with_write(Some(anchor_info), Some(new_anchor))?; // If backfill has completed and the chain is configured to reconstruct historic states, // send a message to the background migrator instructing it to begin reconstruction. - if backfill_complete && self.config.reconstruct_historic_states { + // This can only happen if we have backfilled all the way to genesis. + if backfill_complete + && self.genesis_backfill_slot == Slot::new(0) + && self.config.reconstruct_historic_states + { self.store_migrator.process_reconstruction(); } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 3c5d1fd3b1a..7ade38385a9 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -83,7 +83,7 @@ pub type AddBlocksResult = ( BeaconState, ); -/// Deprecated: Indicates how the `BeaconChainHarness` should produce blocks. +/// Indicates how the `BeaconChainHarness` should produce blocks. #[derive(Clone, Copy, Debug)] pub enum BlockStrategy { /// Produce blocks upon the canonical head (normal case). @@ -99,7 +99,7 @@ pub enum BlockStrategy { }, } -/// Deprecated: Indicates how the `BeaconChainHarness` should produce attestations. +/// Indicates how the `BeaconChainHarness` should produce attestations. #[derive(Clone, Debug)] pub enum AttestationStrategy { /// All validators attest to whichever block the `BeaconChainHarness` has produced. @@ -732,6 +732,7 @@ where state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } + /// Returns a newly created block, signed by the proposer for the given slot. pub async fn make_block( &self, mut state: BeaconState, @@ -1095,8 +1096,6 @@ where .collect() } - /// Deprecated: Use make_unaggregated_attestations() instead. - /// /// A list of attestations for each committee for the given slot. /// /// The first layer of the Vec is organised per committee. For example, if the return value is @@ -2001,9 +2000,6 @@ where .collect() } - /// Deprecated: Do not modify the slot clock manually; rely on add_attested_blocks_at_slots() - /// instead - /// /// Advance the slot of the `BeaconChain`. /// /// Does not produce blocks or attestations. @@ -2017,18 +2013,6 @@ where self.chain.slot_clock.set_current_time(time); } - /// Deprecated: Use make_block() instead - /// - /// Returns a newly created block, signed by the proposer for the given slot. - pub async fn build_block( - &self, - state: BeaconState, - slot: Slot, - _block_strategy: BlockStrategy, - ) -> (SignedBeaconBlock, BeaconState) { - self.make_block(state, slot).await - } - /// Uses `Self::extend_chain` to build the chain out to the `target_slot`. pub async fn extend_to_slot(&self, target_slot: Slot) -> Hash256 { if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { @@ -2064,8 +2048,6 @@ where .await } - /// Deprecated: Use add_attested_blocks_at_slots() instead - /// /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the /// last-produced block (the head of the chain). /// diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 64fa5b79d60..215244b9bad 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -250,6 +250,12 @@ where genesis_state_bytes, } => { info!(context.log(), "Starting checkpoint sync"); + if config.chain.genesis_backfill { + info!( + context.log(), + "Blocks will downloaded all the way back to genesis" + ); + } let anchor_state = BeaconState::from_ssz_bytes(&anchor_state_bytes, &spec) .map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?; @@ -271,6 +277,12 @@ where "Starting checkpoint sync"; "remote_url" => %url, ); + if config.chain.genesis_backfill { + info!( + context.log(), + "Blocks will be downloaded all the way back to genesis" + ); + } let remote = BeaconNodeHttpClient::new( url, diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1105bc41f67..1ff469fe300 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -142,7 +142,8 @@ pub fn spawn_notifier( .get_anchor_info() .map(|ai| ai.oldest_block_slot) { - sync_distance = current_anchor_slot; + sync_distance = current_anchor_slot + .saturating_sub(beacon_chain.genesis_backfill_slot); speedo // For backfill sync use a fake slot which is the distance we've progressed from the starting `oldest_block_slot`. .observe( @@ -207,14 +208,14 @@ pub fn spawn_notifier( "Downloading historical blocks"; "distance" => distance, "speed" => sync_speed_pretty(speed), - "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot))), + "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot).saturating_sub(beacon_chain.genesis_backfill_slot))), ); } else { info!( log, "Downloading historical blocks"; "distance" => distance, - "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot))), + "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot).saturating_sub(beacon_chain.genesis_backfill_slot))), ); } } else if !is_backfilling && last_backfill_log_slot.is_some() { diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index a461a12e530..b2096013bfe 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -940,6 +940,10 @@ impl PeerManager { /// MIN_SYNC_COMMITTEE_PEERS /// number should be set low as an absolute lower bound to maintain peers on the sync /// committees. + /// - Do not prune trusted peers. NOTE: This means if a user has more trusted peers than the + /// excess peer limit, all of the following logic is subverted as we will not prune any peers. + /// Also, the more trusted peers a user has, the less room Lighthouse has to efficiently manage + /// its peers across the subnets. /// /// Prune peers in the following order: /// 1. Remove worst scoring peers @@ -970,7 +974,9 @@ impl PeerManager { .read() .worst_connected_peers() .iter() - .filter(|(_, info)| !info.has_future_duty() && $filter(*info)) + .filter(|(_, info)| { + !info.has_future_duty() && !info.is_trusted() && $filter(*info) + }) { if peers_to_prune.len() >= connected_peer_count.saturating_sub(self.target_peers) @@ -1020,8 +1026,8 @@ impl PeerManager { > = HashMap::new(); for (peer_id, info) in self.network_globals.peers.read().connected_peers() { - // Ignore peers we are already pruning - if peers_to_prune.contains(peer_id) { + // Ignore peers we trust or that we are already pruning + if info.is_trusted() || peers_to_prune.contains(peer_id) { continue; } @@ -1318,25 +1324,47 @@ mod tests { ..Default::default() }; let log = build_log(slog::Level::Debug, false); - let globals = NetworkGlobals::new_test_globals(&log); + let globals = NetworkGlobals::new_test_globals(vec![], &log); + PeerManager::new(config, Arc::new(globals), &log).unwrap() + } + + async fn build_peer_manager_with_trusted_peers( + trusted_peers: Vec, + target_peer_count: usize, + ) -> PeerManager { + let config = config::Config { + target_peer_count, + discovery_enabled: false, + ..Default::default() + }; + let log = build_log(slog::Level::Debug, false); + let globals = NetworkGlobals::new_test_globals(trusted_peers, &log); PeerManager::new(config, Arc::new(globals), &log).unwrap() } #[tokio::test] async fn test_peer_manager_disconnects_correctly_during_heartbeat() { - let mut peer_manager = build_peer_manager(3).await; - - // Create 5 peers to connect to. + // Create 6 peers to connect to with a target of 3. // 2 will be outbound-only, and have the lowest score. + // 1 will be a trusted peer. + // The other 3 will be ingoing peers. + + // We expect this test to disconnect from 3 peers. 1 from the outbound peer (the other must + // remain due to the outbound peer limit) and 2 from the ingoing peers (the trusted peer + // should remain connected). let peer0 = PeerId::random(); let peer1 = PeerId::random(); let peer2 = PeerId::random(); let outbound_only_peer1 = PeerId::random(); let outbound_only_peer2 = PeerId::random(); + let trusted_peer = PeerId::random(); + + let mut peer_manager = build_peer_manager_with_trusted_peers(vec![trusted_peer], 3).await; peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None); peer_manager.inject_connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap(), None); peer_manager.inject_connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None); peer_manager.inject_connect_outgoing( &outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap(), @@ -1366,7 +1394,7 @@ mod tests { .add_to_score(-2.0); // Check initial connected peers. - assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 5); + assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 6); peer_manager.heartbeat(); @@ -1385,8 +1413,22 @@ mod tests { .read() .is_connected(&outbound_only_peer2)); + // The trusted peer remains connected + assert!(peer_manager + .network_globals + .peers + .read() + .is_connected(&trusted_peer)); + peer_manager.heartbeat(); + // The trusted peer remains connected, even after subsequent heartbeats. + assert!(peer_manager + .network_globals + .peers + .read() + .is_connected(&trusted_peer)); + // Check that if we are at target number of peers, we do not disconnect any. assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 3); } @@ -2131,7 +2173,7 @@ mod tests { #[cfg(test)] mod property_based_tests { use crate::peer_manager::config::DEFAULT_TARGET_PEERS; - use crate::peer_manager::tests::build_peer_manager; + use crate::peer_manager::tests::build_peer_manager_with_trusted_peers; use crate::rpc::MetaData; use libp2p::PeerId; use quickcheck::{Arbitrary, Gen, TestResult}; @@ -2142,10 +2184,12 @@ mod tests { #[derive(Clone, Debug)] struct PeerCondition { + peer_id: PeerId, outgoing: bool, attestation_net_bitfield: Vec, sync_committee_net_bitfield: Vec, score: f64, + trusted: bool, gossipsub_score: f64, } @@ -2170,10 +2214,12 @@ mod tests { }; PeerCondition { + peer_id: PeerId::random(), outgoing: bool::arbitrary(g), attestation_net_bitfield, sync_committee_net_bitfield, score: f64::arbitrary(g), + trusted: bool::arbitrary(g), gossipsub_score: f64::arbitrary(g), } } @@ -2185,26 +2231,36 @@ mod tests { if peer_conditions.len() < target_peer_count { return TestResult::discard(); } + let trusted_peers: Vec<_> = peer_conditions + .iter() + .filter_map(|p| if p.trusted { Some(p.peer_id) } else { None }) + .collect(); + // If we have a high percentage of trusted peers, it is very difficult to reason about + // the expected results of the pruning. + if trusted_peers.len() > peer_conditions.len() / 3_usize { + return TestResult::discard(); + } let rt = Runtime::new().unwrap(); rt.block_on(async move { - let mut peer_manager = build_peer_manager(target_peer_count).await; + // Collect all the trusted peers + let mut peer_manager = + build_peer_manager_with_trusted_peers(trusted_peers, target_peer_count).await; // Create peers based on the randomly generated conditions. for condition in &peer_conditions { - let peer = PeerId::random(); let mut attnets = crate::types::EnrAttestationBitfield::::new(); let mut syncnets = crate::types::EnrSyncCommitteeBitfield::::new(); if condition.outgoing { peer_manager.inject_connect_outgoing( - &peer, + &condition.peer_id, "/ip4/0.0.0.0".parse().unwrap(), None, ); } else { peer_manager.inject_connect_ingoing( - &peer, + &condition.peer_id, "/ip4/0.0.0.0".parse().unwrap(), None, ); @@ -2225,22 +2281,51 @@ mod tests { }; let mut peer_db = peer_manager.network_globals.peers.write(); - let peer_info = peer_db.peer_info_mut(&peer).unwrap(); + let peer_info = peer_db.peer_info_mut(&condition.peer_id).unwrap(); peer_info.set_meta_data(MetaData::V2(metadata)); peer_info.set_gossipsub_score(condition.gossipsub_score); peer_info.add_to_score(condition.score); for subnet in peer_info.long_lived_subnets() { - peer_db.add_subscription(&peer, subnet); + peer_db.add_subscription(&condition.peer_id, subnet); } } // Perform the heartbeat. peer_manager.heartbeat(); - TestResult::from_bool( + // The minimum number of connected peers cannot be less than the target peer count + // or submitted peers. + + let expected_peer_count = target_peer_count.min(peer_conditions.len()); + // Trusted peers could make this larger however. + let no_of_trusted_peers = peer_conditions + .iter() + .filter(|condition| condition.trusted) + .count(); + let expected_peer_count = expected_peer_count.max(no_of_trusted_peers); + + let target_peer_condition = peer_manager.network_globals.connected_or_dialing_peers() - == target_peer_count.min(peer_conditions.len()), + == expected_peer_count; + + // It could be that we reach our target outbound limit and are unable to prune any + // extra, which violates the target_peer_condition. + let outbound_peers = peer_manager.network_globals.connected_outbound_only_peers(); + let hit_outbound_limit = outbound_peers == peer_manager.target_outbound_peers(); + + // No trusted peers should be disconnected + let trusted_peer_disconnected = peer_conditions.iter().any(|condition| { + condition.trusted + && !peer_manager + .network_globals + .peers + .read() + .is_connected(&condition.peer_id) + }); + + TestResult::from_bool( + (target_peer_condition || hit_outbound_limit) && !trusted_peer_disconnected, ) }) } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 20870656883..52f0bbd9dfc 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1062,7 +1062,7 @@ impl PeerDB { if let Some(to_drop) = self .peers .iter() - .filter(|(_, info)| info.is_disconnected()) + .filter(|(_, info)| info.is_disconnected() && !info.is_trusted()) .filter_map(|(id, info)| match info.connection_status() { PeerConnectionStatus::Disconnected { since } => Some((id, since)), _ => None, diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index 43e8ebd76a5..295616f36ba 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -129,7 +129,10 @@ impl NetworkGlobals { } /// TESTING ONLY. Build a dummy NetworkGlobals instance. - pub fn new_test_globals(log: &slog::Logger) -> NetworkGlobals { + pub fn new_test_globals( + trusted_peers: Vec, + log: &slog::Logger, + ) -> NetworkGlobals { use crate::CombinedKeyExt; let keypair = libp2p::identity::Keypair::generate_secp256k1(); let enr_key: discv5::enr::CombinedKey = @@ -144,7 +147,7 @@ impl NetworkGlobals { attnets: Default::default(), syncnets: Default::default(), }), - vec![], + trusted_peers, false, log, ) diff --git a/beacon_node/lighthouse_network/src/types/sync_state.rs b/beacon_node/lighthouse_network/src/types/sync_state.rs index 5f09aec27a7..b82e63bd9c0 100644 --- a/beacon_node/lighthouse_network/src/types/sync_state.rs +++ b/beacon_node/lighthouse_network/src/types/sync_state.rs @@ -13,7 +13,7 @@ pub enum SyncState { /// The node is undertaking a backfill sync. This occurs when a user has specified a trusted /// state. The node first syncs "forward" by downloading blocks up to the current head as /// specified by its peers. Once completed, the node enters this sync state and attempts to - /// download all required historical blocks to complete its chain. + /// download all required historical blocks. BackFillSyncing { completed: usize, remaining: usize }, /// The node has completed syncing a finalized chain and is in the process of re-evaluating /// which sync state to progress to. diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index fbc45364aad..a234165d113 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -35,7 +35,7 @@ lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } logging = { path = "../../common/logging" } task_executor = { path = "../../common/task_executor" } -igd = "0.11.1" +igd = "0.12.1" itertools = "0.10.0" num_cpus = "1.13.0" lru_cache = { path = "../../common/lru_cache" } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 3e86d2099f0..d630cf9c398 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -13,6 +13,7 @@ use futures::future::OptionFuture; use futures::prelude::*; use futures::StreamExt; use lighthouse_network::service::Network; +use lighthouse_network::types::GossipKind; use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ rpc::{GoodbyeReason, RPCResponseErrorCode}, @@ -23,7 +24,7 @@ use lighthouse_network::{ MessageId, NetworkEvent, NetworkGlobals, PeerId, }; use slog::{crit, debug, error, info, o, trace, warn}; -use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; +use std::{collections::HashSet, net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; use store::HotColdDB; use strum::IntoStaticStr; use task_executor::ShutdownReason; @@ -671,6 +672,10 @@ impl NetworkService { source, } => self.libp2p.goodbye_peer(&peer_id, reason, source), NetworkMessage::SubscribeCoreTopics => { + if self.subscribed_core_topics() { + return; + } + if self.shutdown_after_sync { if let Err(e) = shutdown_sender .send(ShutdownReason::Success( @@ -909,6 +914,16 @@ impl NetworkService { crit!(self.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); } } + + fn subscribed_core_topics(&self) -> bool { + let core_topics = core_topics_to_subscribe(self.fork_context.current_fork()); + let core_topics: HashSet<&GossipKind> = HashSet::from_iter(&core_topics); + let subscriptions = self.network_globals.gossipsub_subscriptions.read(); + let subscribed_topics: HashSet<&GossipKind> = + subscriptions.iter().map(|topic| topic.kind()).collect(); + + core_topics.is_subset(&subscribed_topics) + } } /// Returns a `Sleep` that triggers after the next change in the beacon chain fork version. diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index d36bbbc79b1..460c8b1ee92 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -159,20 +159,20 @@ impl BackFillSync { // If, for some reason a backfill has already been completed (or we've used a trusted // genesis root) then backfill has been completed. - let (state, current_start) = if let Some(anchor_info) = beacon_chain.store.get_anchor_info() - { - if anchor_info.block_backfill_complete() { - (BackFillState::Completed, Epoch::new(0)) - } else { - ( - BackFillState::Paused, - anchor_info - .oldest_block_slot - .epoch(T::EthSpec::slots_per_epoch()), - ) + let (state, current_start) = match beacon_chain.store.get_anchor_info() { + Some(anchor_info) => { + if anchor_info.block_backfill_complete(beacon_chain.genesis_backfill_slot) { + (BackFillState::Completed, Epoch::new(0)) + } else { + ( + BackFillState::Paused, + anchor_info + .oldest_block_slot + .epoch(T::EthSpec::slots_per_epoch()), + ) + } } - } else { - (BackFillState::NotRequired, Epoch::new(0)) + None => (BackFillState::NotRequired, Epoch::new(0)), }; let bfs = BackFillSync { @@ -287,6 +287,7 @@ impl BackFillSync { remaining: self .current_start .start_slot(T::EthSpec::slots_per_epoch()) + .saturating_sub(self.beacon_chain.genesis_backfill_slot) .as_usize(), }) } @@ -1097,7 +1098,12 @@ impl BackFillSync { match self.batches.entry(batch_id) { Entry::Occupied(_) => { // this batch doesn't need downloading, let this same function decide the next batch - if batch_id == 0 { + if batch_id + == self + .beacon_chain + .genesis_backfill_slot + .epoch(T::EthSpec::slots_per_epoch()) + { self.last_batch_downloaded = true; } @@ -1108,7 +1114,12 @@ impl BackFillSync { } Entry::Vacant(entry) => { entry.insert(BatchInfo::new(&batch_id, BACKFILL_EPOCHS_PER_BATCH)); - if batch_id == 0 { + if batch_id + == self + .beacon_chain + .genesis_backfill_slot + .epoch(T::EthSpec::slots_per_epoch()) + { self.last_batch_downloaded = true; } self.to_be_downloaded = self @@ -1125,7 +1136,7 @@ impl BackFillSync { /// not required. fn reset_start_epoch(&mut self) -> Result<(), ResetEpochError> { if let Some(anchor_info) = self.beacon_chain.store.get_anchor_info() { - if anchor_info.block_backfill_complete() { + if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { Err(ResetEpochError::SyncCompleted) } else { self.current_start = anchor_info @@ -1140,12 +1151,17 @@ impl BackFillSync { /// Checks with the beacon chain if backfill sync has completed. fn check_completed(&mut self) -> bool { - if self.current_start == 0 { + if self.current_start + == self + .beacon_chain + .genesis_backfill_slot + .epoch(T::EthSpec::slots_per_epoch()) + { // Check that the beacon chain agrees if let Some(anchor_info) = self.beacon_chain.store.get_anchor_info() { // Conditions that we have completed a backfill sync - if anchor_info.block_backfill_complete() { + if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { return true; } else { error!(self.log, "Backfill out of sync with beacon chain"); diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 8ade622f8d9..5a70944f6cb 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -50,7 +50,7 @@ impl TestRig { }; let bl = BlockLookups::new(log.new(slog::o!("component" => "block_lookups"))); let cx = { - let globals = Arc::new(NetworkGlobals::new_test_globals(&log)); + let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); SyncNetworkContext::new( network_tx, globals, diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 25314543877..0f1c00e509f 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -599,7 +599,7 @@ mod tests { log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); - let globals = Arc::new(NetworkGlobals::new_test_globals(&log)); + let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); let cx = SyncNetworkContext::new( network_tx, globals.clone(), diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index a578ac7ea20..633cbf0438d 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -247,6 +247,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("One or more comma-delimited trusted peer ids which always have the highest score according to the peer scoring system.") .takes_value(true), ) + .arg( + Arg::with_name("genesis-backfill") + .long("genesis-backfill") + .help("Attempts to download blocks all the way back to genesis when checkpoint syncing.") + .takes_value(false), + ) .arg( Arg::with_name("enable-private-discovery") .long("enable-private-discovery") @@ -519,6 +525,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Specifies how many blocks the database should cache in memory [default: 5]") .takes_value(true) ) + .arg( + Arg::with_name("historic-state-cache-size") + .long("historic-state-cache-size") + .value_name("SIZE") + .help("Specifies how many states from the freezer database should cache in memory [default: 1]") + .takes_value(true) + ) /* * Execution Layer Integration */ @@ -824,7 +837,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("reconstruct-historic-states") .long("reconstruct-historic-states") - .help("After a checkpoint sync, reconstruct historic states in the database.") + .help("After a checkpoint sync, reconstruct historic states in the database. This requires syncing all the way back to genesis.") .takes_value(false) ) .arg( diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 7cd2a627999..f05fea2db18 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -383,6 +383,12 @@ pub fn get_config( .map_err(|_| "block-cache-size is not a valid integer".to_string())?; } + if let Some(historic_state_cache_size) = cli_args.value_of("historic-state-cache-size") { + client_config.store.historic_state_cache_size = historic_state_cache_size + .parse() + .map_err(|_| "historic-state-cache-size is not a valid integer".to_string())?; + } + client_config.store.compact_on_init = cli_args.is_present("compact-db"); if let Some(compact_on_prune) = cli_args.value_of("auto-compact-db") { client_config.store.compact_on_prune = compact_on_prune @@ -502,6 +508,7 @@ pub fn get_config( if cli_args.is_present("reconstruct-historic-states") { client_config.chain.reconstruct_historic_states = true; + client_config.chain.genesis_backfill = true; } let raw_graffiti = if let Some(graffiti) = cli_args.value_of("graffiti") { @@ -774,6 +781,9 @@ pub fn get_config( client_config.chain.optimistic_finalized_sync = !cli_args.is_present("disable-optimistic-finalized-sync"); + if cli_args.is_present("genesis-backfill") { + client_config.chain.genesis_backfill = true; + } // Payload selection configs if cli_args.is_present("always-prefer-builder-payload") { client_config.always_prefer_builder_payload = true; @@ -1044,6 +1054,9 @@ pub fn set_network_config( .map_err(|_| format!("Invalid trusted peer id: {}", peer_id)) }) .collect::, _>>()?; + if config.trusted_peers.len() >= config.target_peers { + slog::warn!(log, "More trusted peers than the target peer limit. This will prevent efficient peer selection criteria."; "target_peers" => config.target_peers, "trusted_peers" => config.trusted_peers.len()); + } } if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 027b8152ee5..581003b4fae 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -7,6 +7,7 @@ use types::{EthSpec, MinimalEthSpec}; pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5; +pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: usize = 1; /// Database configuration parameters. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -17,6 +18,8 @@ pub struct StoreConfig { pub slots_per_restore_point_set_explicitly: bool, /// Maximum number of blocks to store in the in-memory block cache. pub block_cache_size: usize, + /// Maximum number of states from freezer database to store in the in-memory state cache. + pub historic_state_cache_size: usize, /// Whether to compact the database on initialization. pub compact_on_init: bool, /// Whether to compact the database during database pruning. @@ -43,6 +46,7 @@ impl Default for StoreConfig { slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64, slots_per_restore_point_set_explicitly: false, block_cache_size: DEFAULT_BLOCK_CACHE_SIZE, + historic_state_cache_size: DEFAULT_HISTORIC_STATE_CACHE_SIZE, compact_on_init: false, compact_on_prune: true, prune_payloads: true, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 02608f9a0bd..70fb22742e0 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -62,6 +62,8 @@ pub struct HotColdDB, Cold: ItemStore> { pub hot_db: Hot, /// LRU cache of deserialized blocks. Updated whenever a block is loaded. block_cache: Mutex>>, + /// LRU cache of replayed states. + state_cache: Mutex>>, /// Chain spec. pub(crate) spec: ChainSpec, /// Logger. @@ -129,6 +131,7 @@ impl HotColdDB, MemoryStore> { cold_db: MemoryStore::open(), hot_db: MemoryStore::open(), block_cache: Mutex::new(LruCache::new(config.block_cache_size)), + state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), config, spec, log, @@ -162,6 +165,7 @@ impl HotColdDB, LevelDB> { cold_db: LevelDB::open(cold_path)?, hot_db: LevelDB::open(hot_path)?, block_cache: Mutex::new(LruCache::new(config.block_cache_size)), + state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), config, spec, log, @@ -977,40 +981,70 @@ impl, Cold: ItemStore> HotColdDB /// Load a frozen state that lies between restore points. fn load_cold_intermediate_state(&self, slot: Slot) -> Result, Error> { + if let Some(state) = self.state_cache.lock().get(&slot) { + return Ok(state.clone()); + } + // 1. Load the restore points either side of the intermediate state. let low_restore_point_idx = slot.as_u64() / self.config.slots_per_restore_point; let high_restore_point_idx = low_restore_point_idx + 1; + // Use low restore point as the base state. + let mut low_slot: Slot = + Slot::new(low_restore_point_idx * self.config.slots_per_restore_point); + let mut low_state: Option> = None; + + // Try to get a more recent state from the cache to avoid massive blocks replay. + for (s, state) in self.state_cache.lock().iter() { + if s.as_u64() / self.config.slots_per_restore_point == low_restore_point_idx + && *s < slot + && low_slot < *s + { + low_slot = *s; + low_state = Some(state.clone()); + } + } + + // If low_state is still None, use load_restore_point_by_index to load the state. + let low_state = match low_state { + Some(state) => state, + None => self.load_restore_point_by_index(low_restore_point_idx)?, + }; + // Acquire the read lock, so that the split can't change while this is happening. let split = self.split.read_recursive(); - let low_restore_point = self.load_restore_point_by_index(low_restore_point_idx)?; let high_restore_point = self.get_restore_point(high_restore_point_idx, &split)?; - // 2. Load the blocks from the high restore point back to the low restore point. + // 2. Load the blocks from the high restore point back to the low point. let blocks = self.load_blocks_to_replay( - low_restore_point.slot(), + low_slot, slot, self.get_high_restore_point_block_root(&high_restore_point, slot)?, )?; - // 3. Replay the blocks on top of the low restore point. + // 3. Replay the blocks on top of the low point. // Use a forwards state root iterator to avoid doing any tree hashing. // The state root of the high restore point should never be used, so is safely set to 0. let state_root_iter = self.forwards_state_roots_iterator_until( - low_restore_point.slot(), + low_slot, slot, || (high_restore_point, Hash256::zero()), &self.spec, )?; - self.replay_blocks( - low_restore_point, + let state = self.replay_blocks( + low_state, blocks, slot, Some(state_root_iter), StateRootStrategy::Accurate, - ) + )?; + + // If state is not error, put it in the cache. + self.state_cache.lock().put(slot, state.clone()); + + Ok(state) } /// Get the restore point with the given index, or if it is out of bounds, the split state. diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 8e9b3599b14..eca8fc834f0 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -99,8 +99,10 @@ pub struct AnchorInfo { impl AnchorInfo { /// Returns true if the block backfill has completed. - pub fn block_backfill_complete(&self) -> bool { - self.oldest_block_slot == 0 + /// This is a comparison between the oldest block slot and the target backfill slot (which is + /// likely to be the closest WSP). + pub fn block_backfill_complete(&self, target_slot: Slot) -> bool { + self.oldest_block_slot <= target_slot } } diff --git a/book/src/LaTeX/full-withdrawal.tex b/book/src/LaTeX/full-withdrawal.tex new file mode 100644 index 00000000000..2447ba0974a --- /dev/null +++ b/book/src/LaTeX/full-withdrawal.tex @@ -0,0 +1,66 @@ +% To compile the file using PdfLaTeX, you may use the latex+dvips+ps2pdf compilation. If you are using TeXstudio, this is builtin and you can choose this option by going to Options > Configure TeXstudio under Build & View, choose DVI -> PS -> PDF Chain + +% Alternatively, you may use XeLaTeX with --shell-escape command. To do so in TeXstuidio, go to Options > Configure TeXstudio > Build. Under "Add Commands", enter a user of your choice, and in the right empty space, insert: txs:///xelatex/[--shell-escape]. When compile, go to Tools > User and select the user you just inserted. + +\documentclass[]{article} +\usepackage{pst-all} +\pagestyle{empty} + + + +\begin{document} + + +\begin{figure} + \psscalebox{1.0 1.0} % Change this value to rescale the drawing. + { + \begin{pspicture}(0,-9.09)(11.8,6.13) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](7.3,6.13)(4.2,5.21) + \rput[bl](4.6,5.51){Voluntary exit} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{-<}(5.8,5.21)(5.8,3.71)(5.8,3.81) + \psline[linecolor=black, linewidth=0.04](1.7,3.61)(9.8,3.61) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,3.61)(1.7,2.61) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](2.9,2.63)(0.8,1.55) + \rput[bl](1.0,1.91){Type 0x00} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,2.63)(8.6,1.55) + \rput[bl](8.8,1.91){Type 0x01} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,3.61)(9.8,2.61) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,1.51)(1.7,0.61) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,0.61)(0.0,-1.19) + \rput[bl](0.6,-0.19){Funds locked in} + \rput[bl](0.7,-0.79){Beacon chain} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](11.8,0.73)(7.9,-1.39) + \rput[bl](9.0,-0.59){Exit queue} + \rput[bl](8.8,0.01){Varying time} + \rput[bl](8.3,-1.09){32 minutes to weeks} + \rput[bl](9.0,-2.89){Fixed time} + \rput[bl](9.0,-3.49){27.3 hours} + \rput[bl](8.8,-5.49){Varying time} + \rput[bl](8.7,-5.99){validator sweep} + \rput[bl](8.9,-6.59){up to 5 days} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](11.6,-2.19)(8.0,-3.89) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](11.7,-4.79)(7.9,-6.89) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-2.49)(0.0,-4.29) + \rput[bl](1.3,-3.29){BLS to} + \rput[bl](0.6,-3.89){execution change} + \psline[linecolor=black, linewidth=0.04, linestyle=dashed, dash=0.17638889cm 0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,-1.19)(1.7,-2.49) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,1.51)(9.8,0.71) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,-1.39)(9.8,-2.19) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,-3.89)(9.8,-4.79) + \psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm](3.7,-3.39)(5.8,-3.39) + \psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(5.8,-3.39)(5.8,-0.39)(7.9,-0.39) + \psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(5.8,-3.39)(8.0,-3.39) + \psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(5.8,-3.39)(5.8,-6.09)(7.9,-6.09) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](11.7,-7.79)(7.9,-9.09) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,-6.89)(9.8,-7.79) + \rput[bl](8.1,-8.59){\Large{Full withdrawal}} + \rput[bl](1.8,-2.09){\textit{\Large{anytime}}} + \rput[bl](4.0,-3.19){\textit{\Large{either}}} + \rput[bl](4.2,-3.89){\textit{\Large{one}}} + \end{pspicture} + } +\end{figure} + + + +\end{document} diff --git a/book/src/LaTeX/partial-withdrawal.tex b/book/src/LaTeX/partial-withdrawal.tex new file mode 100644 index 00000000000..05db3b68889 --- /dev/null +++ b/book/src/LaTeX/partial-withdrawal.tex @@ -0,0 +1,50 @@ +% To compile the file using PdfLaTeX, you may use the latex+dvips+ps2pdf compilation. If you are using TeXstudio, this is builtin and you can choose this option by going to Options > Configure TeXstudio under Build & View, choose DVI -> PS -> PDF Chain + +% Alternatively, you may use XeLaTeX with --shell-escape command. To do so in TeXstuidio, go to Options > Configure TeXstudio > Build. Under "Add Commands", enter a user of your choice, and in the right empty space, insert: txs:///xelatex/[--shell-escape]. When compile, go to Tools > User and select the user you just inserted. + + +\documentclass[]{article} +\usepackage{pst-all} +\pagestyle{empty} + + + +\begin{document} + +\begin{figure} + \psscalebox{1.0 1.0} % Change this value to rescale the drawing. + { + \begin{pspicture}(0,-8.09)(10.7,5.53) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](7.14,5.53)(3.6,4.45) + \rput[bl](3.8,4.81){Partial withdrawals} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{-<}(5.2,4.41)(5.2,2.91)(5.2,3.01) + \psline[linecolor=black, linewidth=0.04](1.8,2.81)(8.9,2.81) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.8,2.81)(1.8,1.81) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](2.7,1.83)(0.6,0.75) + \rput[bl](0.8,1.09){Type 0x00} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](9.8,1.83)(7.7,0.75) + \rput[bl](7.92,1.07){Type 0x01} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(8.9,2.81)(8.9,1.81) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,0.71)(1.7,-0.19) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-0.19)(0.0,-1.99) + \rput[bl](0.66,-0.99){Funds locked in} + \rput[bl](0.9,-1.59){Beacon chain} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,-3.29)(6.8,-5.09) + \rput[bl](7.6,-3.99){validator sweep} + \rput[bl](7.5,-4.69){$\sim$ every 5 days} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-3.29)(0.0,-5.09) + \rput[bl](1.3,-4.09){BLS to} + \rput[bl](0.5,-4.69){execution change} + \psline[linecolor=black, linewidth=0.04, linestyle=dashed, dash=0.17638889cm 0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,-1.99)(1.7,-3.29) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(8.9,0.71)(8.9,-3.29) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(3.7,-4.19)(6.7,-4.19) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,-6.29)(6.9,-8.09) + \rput[bl](7.0,-6.99){Balance above 32 ETH} + \rput[bl](7.9,-7.59){withdrawn} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(8.9,-5.09)(8.9,-6.29) + \rput[bl](1.8,-2.89){\textit{\Large{anytime}}} + \end{pspicture} + } +\end{figure} + +\end{document} \ No newline at end of file diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index ba234632d72..bfd5a02a6f2 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -11,15 +11,10 @@ * [Update Priorities](./installation-priorities.md) * [Run a Node](./run_a_node.md) * [Become a Validator](./mainnet-validator.md) - * [Become a Testnet Validator](./testnet-validator.md) -* [Key Management](./key-management.md) - * [Create a wallet](./wallet-create.md) - * [Create a validator](./validator-create.md) - * [Key recovery](./key-recovery.md) * [Validator Management](./validator-management.md) - * [Importing from the Staking Launchpad](./validator-import-launchpad.md) * [Slashing Protection](./slashing-protection.md) * [Voluntary Exits](./voluntary-exit.md) + * [Partial Withdrawals](./partial-withdrawal.md) * [Validator Monitoring](./validator-monitoring.md) * [Doppelganger Protection](./validator-doppelganger.md) * [Suggested Fee Recipient](./suggested-fee-recipient.md) @@ -46,6 +41,8 @@ * [Remote Signing with Web3Signer](./validator-web3signer.md) * [Database Configuration](./advanced_database.md) * [Database Migrations](./database-migrations.md) + * [Key Management](./key-management.md) + * [Key Recovery](./key-recovery.md) * [Advanced Networking](./advanced_networking.md) * [Running a Slasher](./slasher.md) * [Redundancy](./redundancy.md) diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index 397d9a28b5f..57e49531ca0 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -58,6 +58,16 @@ the `--slots-per-restore-point` flag: lighthouse beacon_node --slots-per-restore-point 32 ``` +### Historic state cache + +Lighthouse includes a cache to avoid repeatedly replaying blocks when loading historic states. Lighthouse will cache a limited number of reconstructed states and will re-use them when serving requests for subsequent states at higher slots. This greatly reduces the cost of requesting several states in order, and we recommend that applications like block explorers take advantage of this cache. + +The historical state cache size can be specified with the flag `--historic-state-cache-size` (default value is 1): + +```bash +lighthouse beacon_node --historic-state-cache-size 4 +``` + ## Glossary * _Freezer DB_: part of the database storing finalized states. States are stored in a sparser diff --git a/book/src/api-bn.md b/book/src/api-bn.md index 481c0016942..b86e593bf1c 100644 --- a/book/src/api-bn.md +++ b/book/src/api-bn.md @@ -72,8 +72,7 @@ specification][OpenAPI]. Returns the block header at the head of the canonical chain. ```bash -curl -X GET "http://localhost:5052/eth/v1/beacon/headers/head" -H "accept: -application/json" +curl -X GET "http://localhost:5052/eth/v1/beacon/headers/head" -H "accept: application/json" | jq ``` ```json @@ -100,7 +99,7 @@ application/json" Shows the status of validator at index `1` at the `head` state. ```bash -curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json" +curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json" | jq ``` ```json @@ -159,8 +158,7 @@ The API is now being served at `https://localhost:5052`. To test connectivity, you can run the following: ```bash -curl -X GET "https://localhost:5052/eth/v1/node/version" -H "accept: application/json" --cacert cert.pem - +curl -X GET "https://localhost:5052/eth/v1/node/version" -H "accept: application/json" --cacert cert.pem | jq ``` ### Connecting a validator client In order to connect a validator client to a beacon node over TLS, the validator @@ -203,7 +201,7 @@ Ensure the `--http` flag has been supplied at the CLI. You can quickly check that the HTTP endpoint is up using `curl`: ```bash -curl -X GET "http://localhost:5052/eth/v1/node/version" -H "accept: application/json" +curl -X GET "http://localhost:5052/eth/v1/node/version" -H "accept: application/json" | jq ``` The beacon node should respond with its version: diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 5cd7929a3cd..de88c878f6a 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -456,6 +456,7 @@ curl "http://localhost:5052/lighthouse/database/info" | jq "config": { "slots_per_restore_point": 2048, "block_cache_size": 5, + "historic_state_cache_size": 1, "compact_on_init": false, "compact_on_prune": true }, diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index d2b7b518d75..5e0b8963590 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -92,6 +92,7 @@ curl "http://localhost:5052/lighthouse/database/info" "slots_per_restore_point": 8192, "slots_per_restore_point_set_explicitly": true, "block_cache_size": 5, + "historic_state_cache_size": 1, "compact_on_init": false, "compact_on_prune": true } diff --git a/book/src/imgs/full-withdrawal.png b/book/src/imgs/full-withdrawal.png new file mode 100644 index 00000000000..6fa2db6a913 Binary files /dev/null and b/book/src/imgs/full-withdrawal.png differ diff --git a/book/src/imgs/partial-withdrawal.png b/book/src/imgs/partial-withdrawal.png new file mode 100644 index 00000000000..0bf90b91db0 Binary files /dev/null and b/book/src/imgs/partial-withdrawal.png differ diff --git a/book/src/installation.md b/book/src/installation.md index 627326d2a4a..4adaf8da76e 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -29,6 +29,10 @@ After [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th Sep * CPU: Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer -* Memory: 16 GB RAM or more -* Storage: 2 TB solid state storage +* Memory: 32 GB RAM* +* Storage: 2 TB solid state drive * Network: 100 Mb/s download, 20 Mb/s upload broadband connection + +> *Note: 16 GB RAM is becoming rather limited due to the increased resources required. 16 GB RAM would likely result in out of memory errors in the case of a spike in computing demand (e.g., caused by a bug) or during periods of non-finality of the beacon chain. Users with 16 GB RAM also have a limited choice when it comes to selecting an execution client, which does not help with the [client diversity](https://clientdiversity.org/). We therefore recommend users to have at least 32 GB RAM for long term health of the node, while also giving users the flexibility to change client should the thought arise. + +Last update: April 2023 diff --git a/book/src/key-management.md b/book/src/key-management.md index bb1751be166..084b1fbe4cb 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -3,12 +3,12 @@ [launchpad]: https://launchpad.ethereum.org/ > -> **Note: we recommend using the [Staking launchpad][launchpad] to create validators.** +> **Note: While Lighthouse is able to generate the validator keys and the deposit data file to submit to the deposit contract, we strongly recommend using the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) to create validators keys and the deposit data file. This is because the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) which has the option to assign a withdrawal address during the key generation process, while Lighthouse wallet will always generate keys with withdrawal credentials of type 0x00. This means that users who created keys using Lighthouse will have to update their withdrawal credentials in the future to enable withdrawals. In addition, Lighthouse generates the deposit data file in the form of `*.rlp`, which cannot be uploaded to the [Staking launchpad][launchpad] that accepts only `*.json` file. This means that users have to directly interact with the deposit contract to be able to submit the deposit if they were to generate the files using Lighthouse.** Lighthouse uses a _hierarchical_ key management system for producing validator keys. It is hierarchical because each validator key can be _derived_ from a master key, making the validators keys _children_ of the master key. This -scheme means that a single 24-word mnemonic can be used to backup all of your +scheme means that a single 24-word mnemonic can be used to back up all of your validator keys without providing any observable link between them (i.e., it is privacy-retaining). Hierarchical key derivation schemes are common-place in cryptocurrencies, they are already used by most hardware and software wallets @@ -30,37 +30,63 @@ We defined some terms in the context of validator key management: keypair. - Defined in EIP-2335. - **Voting Keypair**: a BLS public and private keypair which is used for - signing blocks, attestations and other messages on regular intervals, - whilst staking in Phase 0. + signing blocks, attestations and other messages on regular intervals in the beacon chain. - **Withdrawal Keypair**: a BLS public and private keypair which will be required _after_ Phase 0 to manage ETH once a validator has exited. -## Overview +## Create a validator +There are 2 steps involved to create a validator key using Lighthouse: + 1. [Create a wallet](#step-1-create-a-wallet-and-record-the-mnemonic) + 1. [Create a validator](#step-2-create-a-validator) -The key management system in Lighthouse involves moving down the above list of -items, starting at one easy-to-backup mnemonic and ending with multiple -keypairs. Creating a single validator looks like this: +The following example demonstrates how to create a single validator key. -1. Create a **wallet** and record the **mnemonic**: - - `lighthouse --network prater account wallet create --name wally --password-file wally.pass` -1. Create the voting and withdrawal **keystores** for one validator: - - `lighthouse --network prater account validator create --wallet-name wally --wallet-password wally.pass --count 1` +### Step 1: Create a wallet and record the mnemonic +A wallet allows for generating practically unlimited validators from an +easy-to-remember 24-word string (a mnemonic). As long as that mnemonic is +backed up, all validator keys can be trivially re-generated. +Whilst the wallet stores the mnemonic, it does not store it in plain-text: the +mnemonic is encrypted with a password. It is the responsibility of the user to +define a strong password. The password is only required for interacting with +the wallet, it is not required for recovering keys from a mnemonic. -In step (1), we created a wallet in `~/.lighthouse/{network}/wallets` with the name -`wally`. We encrypted this using a pre-defined password in the -`wally.pass` file. Then, in step (2), we created one new validator in the -`~/.lighthouse/{network}/validators` directory using `wally` (unlocking it with -`wally.pass`) and storing the passwords to the validators voting key in -`~/.lighthouse/{network}/secrets`. +To create a wallet, use the `lighthouse account wallet` command. For example, if we wish to create a new wallet for the Goerli testnet named `wally` and saves it in `~/.lighthouse/goerli/wallets` with a randomly generated password saved +to `./wallet.pass`: -Thanks to the hierarchical key derivation scheme, we can delete all of the -aforementioned directories and then regenerate them as long as we remembered -the 24-word mnemonic (we don't recommend doing this, though). +```bash +lighthouse --network goerli account wallet create --name wally --password-file wally.pass +``` +Using the above command, a wallet will be created in `~/.lighthouse/goerli/wallets` with the name +`wally`. It is encrypted using the password defined in the +`wally.pass` file. -Creating another validator is easy, it's just a matter of repeating step (2). -The wallet keeps track of how many validators it has generated and ensures that -a new validator is generated each time. +During the wallet creation process, a 24-word mnemonic will be displayed. Record the mnemonic because it allows you to recreate the files in the case of data loss. +> Notes: +> - When navigating to the directory `~/.lighthouse/goerli/wallets`, one will not see the wallet name `wally`, but a hexadecimal folder containing the wallet file. However, when interacting with `lighthouse` in the CLI, the name `wally` will be used. +> - The password is not `wally.pass`, it is the _content_ of the +> `wally.pass` file. +> - If `wally.pass` already exists, the wallet password will be set to the content +> of that file. + +### Step 2: Create a validator +Validators are fundamentally represented by a BLS keypair. In Lighthouse, we use a wallet to generate these keypairs. Once a wallet exists, the `lighthouse account validator create` command can be used to generate the BLS keypair and all necessary information to submit a validator deposit. With the `wally` wallet created in [Step 1](#step-1-create-a-wallet-and-record-the-mnemonic), we can create a validator with the command: + +```bash +lighthouse --network goerli account validator create --wallet-name wally --wallet-password wally.pass --count 1 +``` +This command will: + +- Derive a single new BLS keypair from wallet `wally` in `~/.lighthouse/goerli/wallets`, updating it so that it generates a new key next time. +- Create a new directory `~/.lighthouse/goerli/validators` containing: + - An encrypted keystore file `voting-keystore.json` containing the validator's voting keypair. + - An `eth1_deposit_data.rlp` assuming the default deposit amount (`32 ETH`) which can be submitted to the deposit + contract for the Goerli testnet. Other networks can be set via the + `--network` parameter. +- Create a new directory `~/.lighthouse/goerli/secrets` which stores a password to the validator's voting keypair. + + +If you want to create another validator in the future, repeat [Step 2](#step-2-create-a-validator). The wallet keeps track of how many validators it has generated and ensures that a new validator is generated each time. The important thing is to keep the 24-word mnemonic safe so that it can be used to generate new validator keys if needed. ## Detail @@ -76,36 +102,17 @@ There are three important directories in Lighthouse validator key management: - Defaults to `~/.lighthouse/{network}/validators` - `secrets/`: since the validator signing keys are "hot", the validator process needs access to the passwords to decrypt the keystores in the validators - dir. These passwords are stored here. - - Defaults to `~/.lighthouse/{network}/secrets` where `network` is the name of the network passed in the `--network` parameter (default is `mainnet`). + directory. These passwords are stored here. + - Defaults to `~/.lighthouse/{network}/secrets` + +where `{network}` is the name of the network passed in the `--network` parameter. When the validator client boots, it searches the `validators/` for directories containing voting keystores. When it discovers a keystore, it searches the -`secrets/` dir for a file with the same name as the 0x-prefixed hex -representation of the keystore public key. If it finds this file, it attempts +`secrets/` directory for a file with the same name as the 0x-prefixed validator public key. If it finds this file, it attempts to decrypt the keystore using the contents of this file as the password. If it fails, it logs an error and moves onto the next keystore. The `validators/` and `secrets/` directories are kept separate to allow for ease-of-backup; you can safely backup `validators/` without worrying about leaking private key data. - -### Withdrawal Keypairs - -In Ethereum consensus Phase 0, withdrawal keypairs do not serve any immediate purpose. -However, they become very important _after_ Phase 0: they will provide the -ultimate control of the ETH of withdrawn validators. - -This presents an interesting key management scenario: withdrawal keys are very -important, but not right now. Considering this, Lighthouse has adopted a -strategy where **we do not save withdrawal keypairs to disk by default** (it is -opt-in). Instead, we assert that since the withdrawal keys can be regenerated -from a mnemonic, having them lying around on the file-system only presents risk -and complexity. - -At the time of writing, we do not expose the commands to regenerate keys from -mnemonics. However, key regeneration is tested on the public Lighthouse -repository and will be exposed prior to mainnet launch. - -So, in summary, withdrawal keypairs can be trivially regenerated from the -mnemonic via EIP-2333 so they are not saved to disk like the voting keypairs. diff --git a/book/src/key-recovery.md b/book/src/key-recovery.md index 2474d123caf..a996e95cbc5 100644 --- a/book/src/key-recovery.md +++ b/book/src/key-recovery.md @@ -1,8 +1,8 @@ -# Key recovery +# Key Recovery Generally, validator keystore files are generated alongside a *mnemonic*. If -the keystore and/or the keystore password are lost this mnemonic can +the keystore and/or the keystore password are lost, this mnemonic can regenerate a new, equivalent keystore with a new password. There are two ways to recover keys using the `lighthouse` CLI: @@ -48,7 +48,7 @@ which contains all the information necessary to run a validator using the `lighthouse vc` command. The password to this new keystore will be placed in the `--secrets-dir` (default `~/.lighthouse/{network}/secrets`). -where `network` is the name of the consensus layer network passed in the `--network` parameter (default is `mainnet`). +where `{network}` is the name of the consensus layer network passed in the `--network` parameter (default is `mainnet`). ## Recover a EIP-2386 wallet diff --git a/book/src/mainnet-validator.md b/book/src/mainnet-validator.md index 41735f85bb5..d61d460e3cc 100644 --- a/book/src/mainnet-validator.md +++ b/book/src/mainnet-validator.md @@ -12,25 +12,13 @@ Becoming an Ethereum consensus validator is rewarding, but it's not for the fain familiar with the rules of staking (e.g., rewards, penalties, etc.) and also configuring and managing servers. You'll also need at least 32 ETH! -For those with an understanding of Ethereum consensus and server maintenance, you'll find that running Lighthouse -is easy. Install it, start it, monitor it and keep it updated. You shouldn't need to interact -with it on a day-to-day basis. +Being educated is critical to a validator's success. Before submitting your mainnet deposit, we recommend: -Being educated is critical to validator success. Before submitting your mainnet deposit, we -recommend: - -- Thoroughly exploring the [Staking Launchpad][launchpad] website - - Try running through the deposit process *without* actually submitting a deposit. -- Reading through this documentation, especially the [Slashing Protection][slashing] section. +- Thoroughly exploring the [Staking Launchpad][launchpad] website, try running through the deposit process using a testnet launchpad such as the [Goerli staking launchpad](https://goerli.launchpad.ethereum.org/en/). - Running a [testnet validator][testnet-validator]. +- Reading through this documentation, especially the [Slashing Protection][slashing] section. - Performing a web search and doing your own research. -By far, the best technical learning experience is to run a [Testnet Validator][testnet-validator]. -You can get hands-on experience with all the tools and it's a great way to test your staking -hardware. We recommend *all* mainnet validators to run a testnet validator initially; 32 ETH is a -significant outlay and joining a testnet is a great way to "try before you buy". - -Remember, if you get stuck you can always reach out on our [Discord][discord]. > > **Please note**: the Lighthouse team does not take any responsibility for losses or damages @@ -40,116 +28,187 @@ Remember, if you get stuck you can always reach out on our [Discord][discord]. > due to the actions of other actors on the consensus layer or software bugs. See the > [software license][license] for more detail on liability. -## Using Lighthouse for Mainnet -When using Lighthouse, the `--network` flag selects a network. E.g., +## Become a validator -- `lighthouse` (no flag): Mainnet. -- `lighthouse --network mainnet`: Mainnet. -- `lighthouse --network prater`: Prater (testnet). +There are five primary steps to become a validator: -Using the correct `--network` flag is very important; using the wrong flag can -result in penalties, slashings or lost deposits. As a rule of thumb, always -provide a `--network` flag instead of relying on the default. +1. [Create validator keys](#step-1-create-validator-keys) +1. [Start an execution client and Lighthouse beacon node](#step-2-start-an-execution-client-and-lighthouse-beacon-node) +1. [Import validator keys into Lighthouse](#step-3-import-validator-keys-to-lighthouse) +1. [Start Lighthouse validator client](#step-4-start-lighthouse-validator-client) +1. [Submit deposit](#step-5-submit-deposit-32eth-per-validator) -## Joining a Testnet +> **Important note**: The guide below contains both mainnet and testnet instructions. We highly recommend *all* users to **run a testnet validator** prior to staking mainnet ETH. By far, the best technical learning experience is to run a testnet validator. You can get hands-on experience with all the tools and it's a great way to test your staking +hardware. 32 ETH is a significant outlay and joining a testnet is a great way to "try before you buy". -There are five primary steps to become a testnet validator: + -1. Create validator keys and submit deposits. -1. Start an execution client. -1. Install Lighthouse. -1. Import the validator keys into Lighthouse. -1. Start Lighthouse. -1. Leave Lighthouse running. -Each of these primary steps has several intermediate steps, so we recommend -setting aside one or two hours for this process. +> **Never use real ETH to join a testnet!** Testnet such as the Goerli testnet uses Goerli ETH which is worthless. This allows experimentation without real-world costs. ### Step 1. Create validator keys -The Ethereum Foundation provides a "Staking Launchpad" for creating validator keypairs and submitting -deposits: - -- [Staking Launchpad][launchpad] +The Ethereum Foundation provides the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli/releases) for creating validator keys. Download and run the `staking-deposit-cli` with the command: +```bash +./deposit new-mnemonic +``` +and follow the instructions to generate the keys. When prompted for a network, select `mainnet` if you want to run a mainnet validator, or select `goerli` if you want to run a Goerli testnet validator. A new mnemonic will be generated in the process. -Please follow the steps on the launch pad site to generate validator keys and submit deposits. Make -sure you select "Lighthouse" as your client. +> **Important note:** A mnemonic (or seed phrase) is a 24-word string randomly generated in the process. It is highly recommended to write down the mnemonic and keep it safe offline. It is important to ensure that the mnemonic is never stored in any digital form (computers, mobile phones, etc) connected to the internet. Please also make one or more backups of the mnemonic to ensure your ETH is not lost in the case of data loss. It is very important to keep your mnemonic private as it represents the ultimate control of your ETH. -Move to the next step once you have completed the steps on the launch pad, -including generating keys via the Python CLI and submitting gETH/ETH deposits. +Upon completing this step, the files `deposit_data-*.json` and `keystore-m_*.json` will be created. The keys that are generated from staking-deposit-cli can be easily loaded into a Lighthouse validator client (`lighthouse vc`) in [Step 3](#step-3-import-validator-keys-to-lighthouse). In fact, both of these programs are designed to work with each other. -### Step 2. Start an execution client -Since the consensus chain relies upon the execution chain for validator on-boarding, all consensus validators must have a -connection to an execution client. +> Lighthouse also supports creating validator keys, see [Key management](./key-management.md) for more info. -We provide instructions for using Geth, but you could use any client that implements the JSON RPC -via HTTP. A fast-synced node is sufficient. +### Step 2. Start an execution client and Lighthouse beacon node -#### Installing Geth +Start an execution client and Lighthouse beacon node according to the [Run a Node](./run_a_node.md) guide. Make sure that both execution client and consensus client are synced. -If you're using a Mac, follow the instructions [listed -here](https://github.com/ethereum/go-ethereum/wiki/Installation-Instructions-for-Mac) to install -geth. Otherwise [see here](https://github.com/ethereum/go-ethereum/wiki/Installing-Geth). +### Step 3. Import validator keys to Lighthouse -#### Starting Geth +In [Step 1](#step-1-create-validator-keys), the staking-deposit-cli will generate the validator keys into a `validator_keys` directory. Let's assume that +this directory is `$HOME/staking-deposit-cli/validator_keys`. Using the default `validators` directory in Lighthouse (`~/.lighthouse/mainnet/validators`), run the following command to import validator keys: -Once you have geth installed, use this command to start your execution node: +Mainnet: +```bash +lighthouse --network mainnet account validator import --directory $HOME/staking-deposit-cli/validator_keys +``` +Goerli testnet: ```bash - geth --http +lighthouse --network goerli account validator import --directory $HOME/staking-deposit-cli/validator_keys +``` + +> Note: The user must specify the consensus client network that they are importing the keys by using the `--network` flag. + +> Note: If the validator_keys directory is in a different location, modify the path accordingly. + +> Note: `~/.lighthouse/mainnet` is the default directory which contains the keys and database. To specify a custom directory, see [Custom Directories][advanced-datadir]. + +> Docker users should use the command from the [Docker](#docker-users) documentation. + + +The user will be prompted for a password for each keystore discovered: + ``` +Keystore found at "/home/{username}/staking-deposit-cli/validator_keys/keystore-m_12381_3600_0_0_0-1595406747.json": -### Step 3. Install Lighthouse + - Public key: 0xa5e8702533f6d66422e042a0bf3471ab9b302ce115633fa6fdc5643f804b6b4f1c33baf95f125ec21969a3b1e0dd9e56 + - UUID: 8ea4cf99-8719-43c5-9eda-e97b8a4e074f -*Note: Lighthouse only supports Windows via WSL.* +If you enter the password it will be stored as plain text in validator_definitions.yml so that it is not required each time the validator client starts. -Follow the [Lighthouse Installation Instructions](./installation.md) to install -Lighthouse from one of the available options. +Enter the keystore password, or press enter to omit it: +``` -Proceed to the next step once you've successfully installed Lighthouse and viewed -its `--version` info. +The user can choose whether or not they'd like to store the validator password +in the [`validator_definitions.yml`](./validator-management.md) file. If the +password is *not* stored here, the validator client (`lighthouse vc`) +application will ask for the password each time it starts. This might be nice +for some users from a security perspective (i.e., if it is a shared computer), +however it means that if the validator client restarts, the user will be subject +to offline penalties until they can enter the password. If the user trusts the +computer that is running the validator client and they are seeking maximum +validator rewards, we recommend entering a password at this point. -> Note: Some of the instructions vary when using Docker, ensure you follow the -> appropriate sections later in this guide. +Once the process is done the user will see: -### Step 4. Import validator keys to Lighthouse +``` +Successfully imported keystore. +Successfully updated validator_definitions.yml. -When Lighthouse is installed, follow the [Importing from the Ethereum Staking Launch -pad](./validator-import-launchpad.md) instructions so the validator client can -perform your validator duties. +Successfully imported 1 validators (0 skipped). -Proceed to the next step once you've successfully imported all validators. +WARNING: DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH ANOTHER CLIENT, OR YOU WILL GET SLASHED. +``` -### Step 5. Start Lighthouse +Once you see the above message, you have successfully imported the validator keys. You can now proceed to the next step to start the validator client. -For staking, one needs to run two Lighthouse processes: -- `lighthouse bn`: the "beacon node" which connects to the P2P network and - verifies blocks. -- `lighthouse vc`: the "validator client" which manages validators, using data - obtained from the beacon node via a HTTP API. +### Step 4. Start Lighthouse validator client -Starting these processes is different for binary and docker users: +After the keys are imported, the user can start performing their validator duties +by starting the Lighthouse validator client `lighthouse vc`: -#### Binary users +Mainnet: -Those using the pre- or custom-built binaries can start the two processes with: +```bash +lighthouse vc --network mainnet --suggested-fee-recipient YourFeeRecipientAddress +``` +Goerli testnet: ```bash -lighthouse --network mainnet bn --staking +lighthouse vc --network goerli --suggested-fee-recipient YourFeeRecipientAddress +``` + +The `validator client` manages validators using data obtained from the beacon node via a HTTP API. You are highly recommended to enter a fee-recipient by changing `YourFeeRecipientAddress` to an Ethereum address under your control. + +When `lighthouse vc` starts, check that the validator public key appears +as a `voting_pubkey` as shown below: + +``` +INFO Enabled validator voting_pubkey: 0xa5e8702533f6d66422e042a0bf3471ab9b302ce115633fa6fdc5643f804b6b4f1c33baf95f125ec21969a3b1e0dd9e56 +``` + +Once this log appears (and there are no errors) the `lighthouse vc` application +will ensure that the validator starts performing its duties and being rewarded +by the protocol. + +### Step 5: Submit deposit (32ETH per validator) + +After you have successfully run and synced the execution client, beacon node and validator client, you can now proceed to submit the deposit. Go to the mainnet [Staking launchpad](https://launchpad.ethereum.org/en/) (or [Goerli staking launchpad](https://goerli.launchpad.ethereum.org/en/) for testnet validator) and carefully go through the steps to becoming a validator. Once you are ready, you can submit the deposit by sending 32ETH per validator to the deposit contract. Upload the `deposit_data-*.json` file generated in [Step 1](#step-1-create-validator-keys) to the Staking launchpad. + +> **Important note:** Double check that the deposit contract for mainnet is `0x00000000219ab540356cBB839Cbe05303d7705Fa` before you confirm the transaction. + +Once the deposit transaction is confirmed, it will take a minimum of ~16 hours to a few days/weeks for the beacon chain to process and activate your validator, depending on the queue. Refer to our [FAQ - Why does it take so long for a validator to be activated](./faq.md#why-does-it-take-so-long-for-a-validator-to-be-activated) for more info. + +Once your validator is activated, the validator client will start to publish attestations each epoch: + +``` +Dec 03 08:49:40.053 INFO Successfully published attestation slot: 98, committee_index: 0, head_block: 0xa208…7fd5, +``` + +If you propose a block, the log will look like: + ``` +Dec 03 08:49:36.225 INFO Successfully published block slot: 98, attestations: 2, deposits: 0, service: block +``` + +Congratulations! Your validator is now performing its duties and you will receive rewards for securing the Ethereum network. + +### What is next? +After the validator is running and performing its duties, it is important to keep the validator online to continue accumulating rewards. However, there could be problems with the computer, the internet or other factors that cause the validator to be offline. For this, it is best to subscribe to notifications, e.g., via [beaconcha.in](https://beaconcha.in/) which will send notifications about missed attestations and/or proposals. You will be notified about the validator's offline status and will be able to react promptly. + +The next important thing is to stay up to date with updates to Lighthouse and the execution client. Updates are released from time to time, typically once or twice a month. For Lighthouse updates, you can subscribe to notifications on [Github](https://github.com/sigp/lighthouse) by clicking on `Watch`. If you only want to receive notification on new releases, select `Custom`, then `Releases`. You could also join [Lighthouse Discord](https://discord.gg/cyAszAh) where we will make an announcement when there is a new release. + +You may also want to try out [Siren](./lighthouse-ui.md), a UI developed by Lighthouse to monitor validator performance. + +Once you are familiar with running a validator and server maintenance, you'll find that running Lighthouse is easy. Install it, start it, monitor it and keep it updated. You shouldn't need to interact with it on a day-to-day basis. Happy staking! + +## Docker users + +### Import validator keys + +The `import` command is a little more complex for Docker users, but the example +in this document can be substituted with: ```bash -lighthouse --network mainnet vc +docker run -it \ + -v $HOME/.lighthouse:/root/.lighthouse \ + -v $(pwd)/validator_keys:/root/validator_keys \ + sigp/lighthouse \ + lighthouse --network mainnet account validator import --directory /root/validator_keys ``` -> Note: `~/.lighthouse/mainnet` is the default directory which contains the keys and databases. -> To specify a custom dir, see [Custom Directories][advanced-datadir]. +Here we use two `-v` volumes to attach: -#### Docker users +- `~/.lighthouse` on the host to `/root/.lighthouse` in the Docker container. +- The `validator_keys` directory in the present working directory of the host + to the `/root/validator_keys` directory of the Docker container. +### Start Lighthouse beacon node and validator client Those using Docker images can start the processes with: ```bash @@ -167,29 +226,8 @@ $ docker run \ lighthouse --network mainnet vc ``` -### Step 6. Leave Lighthouse running - -Leave your beacon node and validator client running and you'll see logs as the -beacon node stays synced with the network while the validator client produces -blocks and attestations. - -It will take 4-8+ hours for the beacon chain to process and activate your -validator, however you'll know you're active when the validator client starts -successfully publishing attestations each epoch: - -``` -Dec 03 08:49:40.053 INFO Successfully published attestation slot: 98, committee_index: 0, head_block: 0xa208…7fd5, -``` - -Although you'll produce an attestation each epoch, it's less common to produce a -block. Watch for the block production logs too: - -``` -Dec 03 08:49:36.225 INFO Successfully published block slot: 98, attestations: 2, deposits: 0, service: block -``` -If you see any `ERRO` (error) logs, please reach out on -[Discord](https://discord.gg/cyAszAh) or [create an +If you get stuck you can always reach out on our [Discord][discord] or [create an issue](https://github.com/sigp/lighthouse/issues/new). -Happy staking! + diff --git a/book/src/partial-withdrawal.md b/book/src/partial-withdrawal.md new file mode 100644 index 00000000000..db722d729e2 --- /dev/null +++ b/book/src/partial-withdrawal.md @@ -0,0 +1,23 @@ +# Partial Withdrawals + +After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023: + + - if a validator has a withdrawal credential type `0x00`, the rewards will continue to accumulate and will be locked in the beacon chain. + - if a validator has a withdrawal credential type `0x01`, any rewards above 32ETH will be periodically withdrawn to the withdrawal address. This is also known as the "validator sweep", i.e., once the "validator sweep" reaches your validator's index, your rewards will be withdrawn to the withdrawal address. At the time of writing, with 560,000+ validators on the Ethereum mainnet, you shall expect to receive the rewards approximately every 5 days. + +### FAQ +1. How to know if I have the withdrawal credentials type `0x00` or `0x01`? + + Refer [here](./voluntary-exit.md#1-how-to-know-if-i-have-the-withdrawal-credentials-type-0x01). + +2. My validator has withdrawal credentials type `0x00`, is there a deadline to update my withdrawal credentials? + + No. You can update your withdrawal credentials **anytime**. The catch is that as long as you do not update your withdrawal credentials, your rewards in the beacon chain will continue to be locked in the beacon chain. Only after you update the withdrawal credentials, will the rewards be withdrawn to the withdrawal address. + +3. Do I have to do anything to get my rewards after I update the withdrawal credentials to type `0x01`? + + No. The "validator sweep" occurs automatically and you can expect to receive the rewards every few days. + + Figure below summarizes partial withdrawals. + + ![partial](./imgs/partial-withdrawal.png) \ No newline at end of file diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index fb112c36753..3affa6ab36b 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -1,77 +1,43 @@ # Run a Node -This document provides detail for users who want to run a Lighthouse beacon node. +This section provides the detail for users who want to run a Lighthouse beacon node. You should be finished with one [Installation](./installation.md) method of your choice to continue with the following steps: -1. Set up an [execution node](#step-1-set-up-an-execution-node); -1. Enable [checkpoint sync](#step-2-choose-a-checkpoint-sync-provider); -1. Run [Lighthouse](#step-3-run-lighthouse); -1. [Check logs](#step-4-check-logs); and -1. [Further readings](#step-5-further-readings). +1. Create a [JWT secret file](#step-1-create-a-jwt-secret-file) +1. Set up an [execution node](#step-2-set-up-an-execution-node); +1. Set up a [beacon node](#step-3-set-up-a-beacon-node-using-lighthouse); +1. [Check logs for sync status](#step-4-check-logs); -Checkpoint sync is *optional*; however, we recommend it since it is substantially faster -than syncing from genesis while still providing the same functionality. -## Step 1: Set up an execution node -The Lighthouse beacon node *must* connect to an execution engine in order to validate the transactions -present in blocks. Two flags are used to configure this connection: +## Step 1: Create a JWT secret file +A JWT secret file is used to secure the communication between the execution client and the consensus client. In this step, we will create a JWT secret file which will be used in later steps. -- `--execution-endpoint`: the *URL* of the execution engine API. Often this will be - `http://localhost:8551`. -- `--execution-jwt`: the *path* to the file containing the JWT secret shared by Lighthouse and the - execution engine. This is a mandatory form of authentication that ensures that Lighthouse -has authority to control the execution engine. - -Each execution engine has its own flags for configuring the engine API and JWT. -Please consult the relevant page of your execution engine for the required flags: - -- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/getting-started/consensus-clients) -- [Nethermind: Running Nethermind & CL](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) -- [Besu: Connect to Mainnet](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) -- [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) - -The execution engine connection must be *exclusive*, i.e. you must have one execution node -per beacon node. The reason for this is that the beacon node _controls_ the execution node. - -## Step 2: Choose a checkpoint sync provider - -Lighthouse supports fast sync from a recent finalized checkpoint. -The checkpoint sync is done using a [public endpoint](#use-a-community-checkpoint-sync-endpoint) -provided by the Ethereum community. - -In [step 3](#step-3-run-lighthouse), when running Lighthouse, -we will enable checkpoint sync by providing the URL to the `--checkpoint-sync-url` flag. +```bash +sudo mkdir -p /secrets +openssl rand -hex 32 | tr -d "\n" | sudo tee /secrets/jwt.hex +``` -### Use a community checkpoint sync endpoint +## Step 2: Set up an execution node -The Ethereum community provides various [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) for you to choose from for your initial checkpoint state. Select one for your network and use it as the URL. +The Lighthouse beacon node *must* connect to an execution engine in order to validate the transactions present in blocks. The execution engine connection must be *exclusive*, i.e. you must have one execution node +per beacon node. The reason for this is that the beacon node _controls_ the execution node. Select an execution client from the list below and run it: -For example, the URL for Sigma Prime's checkpoint sync server for mainnet is `https://mainnet.checkpoint.sigp.io`, -which we will use in [step 3](#step-3-run-lighthouse). -## Step 3: Run Lighthouse +- [Nethermind](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) +- [Besu](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) +- [Erigon](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) +- [Geth](https://geth.ethereum.org/docs/getting-started/consensus-clients) -To run Lighthouse, we use the three flags from the steps above: -- `--execution-endpoint`; -- `--execution-jwt`; and -- `--checkpoint-sync-url`. -Additionally, we run Lighthouse with the `--network` flag, which selects a network: +> Note: Each execution engine has its own flags for configuring the engine API and JWT secret to connect to a beacon node. Please consult the relevant page of your execution engine as above for the required flags. -- `lighthouse` (no flag): Mainnet. -- `lighthouse --network mainnet`: Mainnet. -- `lighthouse --network goerli`: Goerli (testnet). -Using the correct `--network` flag is very important; using the wrong flag can -result in penalties, slashings or lost deposits. As a rule of thumb, *always* -provide a `--network` flag instead of relying on the default. +Once the execution client is up, just let it continue running. The execution client will start syncing when it connects to a beacon node. Depending on the execution client and computer hardware specifications, syncing can take from a few hours to a few days. You can safely proceed to Step 3 to set up a beacon node while the execution client is still syncing. -For the testnets we support [Goerli](https://goerli.net/) (`--network goerli`), -[Sepolia](https://sepolia.dev/) (`--network sepolia`), and [Gnosis chain](https://www.gnosis.io/) (`--network gnosis`). +## Step 3: Set up a beacon node using Lighthouse -Minor modifications depend on if you want to run your node while [staking](#staking) or [non-staking](#non-staking). -In the following, we will provide examples of what a Lighthouse setup could look like. +In this step, we will set up a beacon node. Use the following command to start a beacon node that connects to the execution node: ### Staking @@ -84,9 +50,30 @@ lighthouse bn \ --http ``` -A Lighthouse beacon node can be configured to expose an HTTP server by supplying the `--http` flag. -The default listen address is `127.0.0.1:5052`. -The HTTP API is required for the beacon node to accept connections from the *validator client*, which manages keys. +> Note: If you download the binary file, you need to navigate to the directory of the binary file to run the above command. + +Notable flags: +- `--network` flag, which selects a network: + - `lighthouse` (no flag): Mainnet. + - `lighthouse --network mainnet`: Mainnet. + - `lighthouse --network goerli`: Goerli (testnet). + - `lighthouse --network sepolia`: Sepolia (testnet). + - `lighthouse --network gnosis`: Gnosis chain + + > Note: Using the correct `--network` flag is very important; using the wrong flag can +result in penalties, slashings or lost deposits. As a rule of thumb, *always* +provide a `--network` flag instead of relying on the default. +- `--execution-endpoint`: the URL of the execution engine API. If the execution engine is running on the same computer with the default port, this will be + `http://localhost:8551`. +- `--execution-jwt`: the path to the JWT secret file shared by Lighthouse and the + execution engine. This is a mandatory form of authentication which ensures that Lighthouse has the authority to control the execution engine. +- `--checkpoint-sync-url`: Lighthouse supports fast sync from a recent finalized checkpoint. Checkpoint sync is *optional*; however, we **highly recommend** it since it is substantially faster than syncing from genesis while still providing the same functionality. The checkpoint sync is done using [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) provided by the Ethereum community. For example, in the above command, we use the URL for Sigma Prime's checkpoint sync server for mainnet `https://mainnet.checkpoint.sigp.io`. +- `--http`: to expose an HTTP server of the beacon chain. The default listening address is `http://localhost:5052`. The HTTP API is required for the beacon node to accept connections from the *validator client*, which manages keys. + + + +If you intend to run the beacon node without running the validator client (e.g., for non-staking purposes such as supporting the network), you can modify the above command so that the beacon node is configured for non-staking purposes: + ### Non-staking @@ -99,17 +86,19 @@ lighthouse bn \ --disable-deposit-contract-sync ``` -Since we are not staking, we can use the `--disable-deposit-contract-sync` flag. +Since we are not staking, we can use the `--disable-deposit-contract-sync` flag to disable syncing of deposit logs from the execution node. + ---- Once Lighthouse runs, we can monitor the logs to see if it is syncing correctly. -## Step 4: Check logs + + +## Step 4: Check logs for sync status Several logs help you identify if Lighthouse is running correctly. ### Logs - Checkpoint sync -Lighthouse will print a message to indicate that checkpoint sync is being used: +If you run Lighthouse with the flag `--checkpoint-sync-url`, Lighthouse will print a message to indicate that checkpoint sync is being used: ``` INFO Starting checkpoint sync remote_url: http://remote-bn:8000/, service: beacon @@ -122,16 +111,17 @@ loaded from the remote beacon node: INFO Loaded checkpoint block and state state_root: 0xe8252c68784a8d5cc7e5429b0e95747032dd1dcee0d1dc9bdaf6380bf90bc8a6, block_root: 0x5508a20147299b1a7fe9dbea1a8b3bf979f74c52e7242039bd77cbff62c0695a, slot: 2034720, service: beacon ``` -Once the checkpoint is loaded Lighthouse will sync forwards to the head of the chain. +Once the checkpoint is loaded, Lighthouse will sync forwards to the head of the chain. + +If a validator client is connected to the beacon node it will be able to start its duties as soon as forwards sync completes, which typically takes 1-2 minutes. -If a validator client is connected to the node then it will be able to start completing its duties -as soon as forwards sync completes. +> Note: If you have an existing Lighthouse database, you will need to delete the database by using the `--purge-db` flag or manually delete the database with `sudo rm -r /path_to_database/beacon`. If you do use a `--purge-db` flag, once checkpoint sync is complete, you can remove the flag upon a restart. > **Security Note**: You should cross-reference the `block_root` and `slot` of the loaded checkpoint > against a trusted source like another [public endpoint](https://eth-clients.github.io/checkpoint-sync-endpoints/), > a friend's node, or a block explorer. -#### Backfilling Blocks +### Backfilling Blocks Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks from the checkpoint back to genesis. @@ -156,16 +146,17 @@ as `verified` indicating that they have been processed successfully by the execu INFO Synced, slot: 3690668, block: 0x1244…cb92, epoch: 115333, finalized_epoch: 115331, finalized_root: 0x0764…2a3d, exec_hash: 0x929c…1ff6 (verified), peers: 78 ``` +Once you see the above message - congratulations! This means that your node is synced and you have contributed to the decentralization and security of the Ethereum network. -## Step 5: Further readings +## Further readings Several other resources are the next logical step to explore after running your beacon node: -- Learn how to [become a validator](./mainnet-validator.md); +- If you intend to run a validator, proceed to [become a validator](./mainnet-validator.md); - Explore how to [manage your keys](./key-management.md); - Research on [validator management](./validator-management.md); - Dig into the [APIs](./api.md) that the beacon node and validator client provide; - Study even more about [checkpoint sync](./checkpoint-sync.md); or - Investigate what steps had to be taken in the past to execute a smooth [merge migration](./merge-migration.md). -Finally, if you a struggling with anything, join our [Discord](https://discord.gg/cyAszAh). We are happy to help! \ No newline at end of file +Finally, if you are struggling with anything, join our [Discord](https://discord.gg/cyAszAh). We are happy to help! diff --git a/book/src/slashing-protection.md b/book/src/slashing-protection.md index a60c8e36dc2..6e2ca65b416 100644 --- a/book/src/slashing-protection.md +++ b/book/src/slashing-protection.md @@ -21,8 +21,8 @@ and carefully to keep your validators safe. See the [Troubleshooting](#troublesh The database will be automatically created, and your validators registered with it when: -* Importing keys from another source (e.g. Launchpad, Teku, Prysm, `ethdo`). - See [the docs on importing keys](./validator-import-launchpad.md). +* Importing keys from another source (e.g. [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli/releases), Lodestar, Nimbus, Prysm, Teku, [ethdo](https://github.com/wealdtech/ethdo)). + See [import validator keys](./mainnet-validator.md#step-3-import-validator-keys-to-lighthouse). * Creating keys using Lighthouse itself (`lighthouse account validator create`) * Creating keys via the [validator client API](./api-vc.md). @@ -45,7 +45,7 @@ Examples of circumstances where the slashing protection database is effective ar your client to be imported into Lighthouse's slashing protection database. See [Import and Export](#import-and-export). * Misplacing `slashing_protection.sqlite` during a datadir change or migration between machines. - By default Lighthouse will refuse to start if it finds validator keys that are not registered + By default, Lighthouse will refuse to start if it finds validator keys that are not registered in the slashing protection database. Examples where it is **ineffective** are: @@ -54,7 +54,7 @@ Examples where it is **ineffective** are: clients (e.g. Lighthouse and Prysm) running on the same machine, two Lighthouse instances using different datadirs, or two clients on completely different machines (e.g. one on a cloud server and one running locally). You are responsible for ensuring that your validator keys are never - running simultaneously – the slashing protection DB **cannot protect you in this case**. + running simultaneously – the slashing protection database **cannot protect you in this case**. * Importing keys from another client without also importing voting history. * If you use `--init-slashing-protection` to recreate a missing slashing protection database. @@ -64,19 +64,22 @@ Lighthouse supports the slashing protection interchange format described in [EIP interchange file is a record of blocks and attestations signed by a set of validator keys – basically a portable slashing protection database! -With your validator client stopped, you can import a `.json` interchange file from another client +To import a slashing protection database to Lighthouse, you first need to export your existing client's database. Instructions to export the slashing protection database for other clients are listed below: +- [Lodestar](https://chainsafe.github.io/lodestar/reference/cli/#validator-slashing-protection-export) +- [Nimbus](https://nimbus.guide/migration.html#2-export-slashing-protection-history) +- [Prysm](https://docs.prylabs.network/docs/wallet/slashing-protection#exporting-your-validators-slashing-protection-history) +- [Teku](https://docs.teku.consensys.net/HowTo/Prevent-Slashing#export-a-slashing-protection-file) + + +Once you have the slashing protection database from your existing client, you can now import the database to Lighthouse. With your validator client stopped, you can import a `.json` interchange file from another client using this command: ```bash lighthouse account validator slashing-protection import ``` -Instructions for exporting your existing client's database are out of scope for this document, -please check the other client's documentation for instructions. - When importing an interchange file, you still need to import the validator keystores themselves -separately, using the instructions for [importing keystores into -Lighthouse](./validator-import-launchpad.md). +separately, using the instructions for [import validator keys](./mainnet-validator.md#step-3-import-validator-keys-to-lighthouse). --- @@ -93,7 +96,7 @@ up to date. ### How Import Works -Since version 1.6.0 Lighthouse will ignore any slashable data in the import data and will safely +Since version 1.6.0, Lighthouse will ignore any slashable data in the import data and will safely update the low watermarks for blocks and attestations. It will store only the maximum-slot block for each validator, and the maximum source/target attestation. This is faster than importing all data while also being more resilient to repeated imports & stale data. @@ -121,7 +124,7 @@ Oct 12 14:41:26.415 CRIT Failed to start validator client reason: Failed Ensure that `slashing_protection.sqlite` is in "/home/karlm/.lighthouse/mainnet/validators" folder ``` -Usually this indicates that during some manual intervention the slashing database has been +Usually this indicates that during some manual intervention, the slashing database has been misplaced. This error can also occur if you have upgraded from Lighthouse v0.2.x to v0.3.x without moving the slashing protection database. If you have imported your keys into a new node, you should never see this error (see [Initialization](#initialization)). @@ -137,7 +140,7 @@ the Lighthouse validator client with the `--init-slashing-protection` flag. This dangerous and should not be used lightly, and we **strongly recommend** you try finding your old slashing protection database before using it. If you do decide to use it, you should wait at least 1 epoch (~7 minutes) from when your validator client was last actively signing -messages. If you suspect your node experienced a clock drift issue you should wait +messages. If you suspect your node experienced a clock drift issue, you should wait longer. Remember that the inactivity penalty for being offline for even a day or so is approximately equal to the rewards earned in a day. You will get slashed if you use `--init-slashing-protection` incorrectly. diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index c1739aa9378..f3ece850621 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -1,14 +1,12 @@ # Suggested Fee Recipient The _fee recipient_ is an Ethereum address nominated by a beacon chain validator to receive -tips from user transactions. If you run validators on a network that has already merged -or is due to merge soon then you should nominate a fee recipient for your validators. +tips from user transactions. Given that all mainnet and testnets have gone through [The Merge](https://ethereum.org/en/roadmap/merge/), if you run validators on a network, you are strongly recommended to nominate a fee recipient for your validators. Failing to nominate a fee recipient will result in losing the tips from transactions. ## Background During post-merge block production, the Beacon Node (BN) will provide a `suggested_fee_recipient` to -the execution node. This is a 20-byte Ethereum address which the EL might choose to set as the -coinbase and the recipient of other fees or rewards. +the execution node. This is a 20-byte Ethereum address which the execution node might choose to set as the recipient of other fees or rewards. There is no guarantee that an execution node will use the `suggested_fee_recipient` to collect fees, it may use any address it chooses. It is assumed that an honest execution node *will* use the @@ -189,4 +187,4 @@ accumulates other staking rewards. The reason for this is that transaction fees validated by the execution node, and therefore need to be paid to an address that exists on the execution chain. Validators use BLS keys which do not correspond to Ethereum addresses, so they have no "presence" on the execution chain. Therefore, it's necessary for each validator to nominate -a separate fee recipient address. +a fee recipient address. diff --git a/book/src/testnet-validator.md b/book/src/testnet-validator.md deleted file mode 100644 index 98ba66c2445..00000000000 --- a/book/src/testnet-validator.md +++ /dev/null @@ -1,23 +0,0 @@ -# Become a Testnet Validator - -[mainnet-validator]: ./mainnet-validator.md -[prater-launchpad]: https://prater.launchpad.ethereum.org/ - -Joining an Ethereum consensus testnet is a great way to get familiar with staking in Phase 0. All users should -experiment with a testnet prior to staking mainnet ETH. - -To join a testnet, you can follow the [Become an Ethereum consensus Mainnet Validator][mainnet-validator] -instructions but with a few differences: - -1. Use the appropriate Staking launchpad website: - - [Prater][prater-launchpad] -1. Instead of `--network mainnet`, use the appropriate network flag: - - `--network prater`: Prater. -1. Use a Goerli execution node instead of a mainnet one: - - For Geth, this means using `geth --goerli --http`. -1. Notice that Lighthouse will store its files in a different directory by default: - - `~/.lighthouse/prater`: Prater. - -> -> **Never use real ETH to join a testnet!** All of the testnets listed here use Goerli ETH which is -> basically worthless. This allows experimentation without real-world costs. diff --git a/book/src/validator-create.md b/book/src/validator-create.md deleted file mode 100644 index f13c449b9f8..00000000000 --- a/book/src/validator-create.md +++ /dev/null @@ -1,90 +0,0 @@ -# Create a validator - -[launchpad]: https://launchpad.ethereum.org/ - -> -> **Note: we recommend using the [Staking launchpad][launchpad] to create validators.** - -Validators are fundamentally represented by a BLS keypair. In Lighthouse, we -use a [wallet](./wallet-create.md) to generate these keypairs. Once a wallet -exists, the `lighthouse account validator create` command is used to generate -the BLS keypair and all necessary information to submit a validator deposit and -have that validator operate in the `lighthouse validator_client`. - -## Usage - -To create a validator from a [wallet](./wallet-create.md), use the `lighthouse -account validator create` command: - -```bash -lighthouse account validator create --help - -Creates new validators from an existing EIP-2386 wallet using the EIP-2333 HD key derivation scheme. - -USAGE: - lighthouse account_manager validator create [FLAGS] [OPTIONS] - -FLAGS: - -h, --help Prints help information - --stdin-inputs If present, read all user inputs from stdin instead of tty. - --store-withdrawal-keystore If present, the withdrawal keystore will be stored alongside the voting keypair. - It is generally recommended to *not* store the withdrawal key and instead - generate them from the wallet seed when required. - -V, --version Prints version information - -OPTIONS: - --at-most - Observe the number of validators in --validator-dir, only creating enough to reach the given count. Never - deletes an existing validator. - --count - The number of validators to create, regardless of how many already exist - - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - The verbosity level for emitting logs. [default: info] [possible values: info, debug, trace, warn, error, - crit] - --deposit-gwei - The GWEI value of the deposit amount. Defaults to the minimum amount required for an active validator - (MAX_EFFECTIVE_BALANCE) - --network - Name of the Eth2 chain Lighthouse will sync and follow. [default: mainnet] [possible values: prater, mainnet] - --secrets-dir - The path where the validator keystore passwords will be stored. Defaults to ~/.lighthouse/{network}/secrets - - -s, --spec - This flag is deprecated, it will be disallowed in a future release. This value is now derived from the - --network or --testnet-dir flags. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --wallet-name Use the wallet identified by this name - --wallet-password - A path to a file containing the password which will unlock the wallet. - - --wallets-dir - A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{network}/wallets -``` - -## Example - -The example assumes that the `wally` wallet was generated from the -[wallet](./wallet-create.md) example. - -```bash -lighthouse --network prater account validator create --wallet-name wally --wallet-password wally.pass --count 1 -``` - -This command will: - -- Derive a single new BLS keypair from wallet `wally` in `~/.lighthouse/{network}/wallets`, updating it so that it generates a - new key next time. -- Create a new directory in `~/.lighthouse/{network}/validators` containing: - - An encrypted keystore containing the validators voting keypair. - - An `eth1_deposit_data.rlp` assuming the default deposit amount (`32 ETH` - for most testnets and mainnet) which can be submitted to the deposit - contract for the Prater testnet. Other testnets can be set via the - `--network` CLI param. -- Store a password to the validators voting keypair in `~/.lighthouse/{network}/secrets`. diff --git a/book/src/validator-doppelganger.md b/book/src/validator-doppelganger.md index d880cce0ae4..6eaddcc7b0b 100644 --- a/book/src/validator-doppelganger.md +++ b/book/src/validator-doppelganger.md @@ -16,8 +16,7 @@ achieves this by staying silent for 2-3 epochs after a validator is started so i other instances of that validator before starting to sign potentially slashable messages. > Note: Doppelganger Protection is not yet interoperable, so if it is configured on a Lighthouse -> validator client, the client must be connected to a Lighthouse beacon node. Because Infura -> uses Teku, Lighthouse's Doppelganger Protection cannot yet be used with Infura's Eth2 service. +> validator client, the client must be connected to a Lighthouse beacon node. ## Initial Considerations @@ -30,9 +29,9 @@ is no guarantee that your Beacon Node (BN) will see messages from it. **It is fe doppelganger protection to fail to detect another validator due to network faults or other common circumstances.** -DP should be considered a last-line-of-defence that *might* save a validator from being slashed due +DP should be considered as a last-line-of-defence that *might* save a validator from being slashed due to operator error (i.e. running two instances of the same validator). Users should -*never* rely upon DP and should practice the same caution with regards to duplicating validators as +*never* rely upon DP and should practice the same caution with regard to duplicating validators as if it did not exist. **Remember: even with doppelganger protection enabled, it is not safe to run two instances of the @@ -44,7 +43,7 @@ DP works by staying silent on the network for 2-3 epochs before starting to sign Staying silent and refusing to sign messages will cause the following: - 2-3 missed attestations, incurring penalties and missed rewards. -- 2-3 epochs of missed sync committee contributions (if the validator is in a sync committee, which is unlikely), incurring penalties and missed rewards (post-Altair upgrade only). +- 2-3 epochs of missed sync committee contributions (if the validator is in a sync committee, which is unlikely), incurring penalties and missed rewards. - Potentially missed rewards by missing a block proposal (if the validator is an elected block proposer, which is unlikely). @@ -105,7 +104,7 @@ there is no other instance of that validator running elsewhere!** The steps to solving a doppelganger vary depending on the case, but some places to check are: 1. Is there another validator process running on this host? - - Unix users can check `ps aux | grep lighthouse` + - Unix users can check by running the command `ps aux | grep lighthouse` - Windows users can check the Task Manager. 1. Has this validator recently been moved from another host? Check to ensure it's not running. 1. Has this validator been delegated to a staking service? diff --git a/book/src/validator-import-launchpad.md b/book/src/validator-import-launchpad.md deleted file mode 100644 index 9849b91b70a..00000000000 --- a/book/src/validator-import-launchpad.md +++ /dev/null @@ -1,111 +0,0 @@ -# Importing from the Ethereum Staking Launch pad - -The [Staking Launchpad](https://github.com/ethereum/eth2.0-deposit) is a website -from the Ethereum Foundation which guides users how to use the -[`eth2.0-deposit-cli`](https://github.com/ethereum/eth2.0-deposit-cli) -command-line program to generate consensus validator keys. - -The keys that are generated from `eth2.0-deposit-cli` can be easily loaded into -a Lighthouse validator client (`lighthouse vc`). In fact, both of these -programs are designed to work with each other. - -This guide will show the user how to import their keys into Lighthouse so they -can perform their duties as a validator. The guide assumes the user has already -[installed Lighthouse](./installation.md). - -## Instructions - -Whilst following the steps on the website, users are instructed to download the -[`eth2.0-deposit-cli`](https://github.com/ethereum/eth2.0-deposit-cli) -repository. This `eth2-deposit-cli` script will generate the validator BLS keys -into a `validator_keys` directory. We assume that the user's -present-working-directory is the `eth2-deposit-cli` repository (this is where -you will be if you just ran the `./deposit.sh` script from the Staking Launch pad -website). If this is not the case, simply change the `--directory` to point to -the `validator_keys` directory. - -Now, assuming that the user is in the `eth2-deposit-cli` directory and they're -using the default (`~/.lighthouse/{network}/validators`) `validators` directory (specify a different one using -`--validators-dir` flag), they can follow these steps: - -### 1. Run the `lighthouse account validator import` command. - -Docker users should use the command from the [Docker](#docker) -section, all other users can use: - - -```bash -lighthouse --network mainnet account validator import --directory validator_keys -``` - -Note: The user must specify the consensus client network that they are importing the keys for using the `--network` flag. - - -After which they will be prompted for a password for each keystore discovered: - -``` -Keystore found at "validator_keys/keystore-m_12381_3600_0_0_0-1595406747.json": - - - Public key: 0xa5e8702533f6d66422e042a0bf3471ab9b302ce115633fa6fdc5643f804b6b4f1c33baf95f125ec21969a3b1e0dd9e56 - - UUID: 8ea4cf99-8719-43c5-9eda-e97b8a4e074f - -If you enter a password it will be stored in validator_definitions.yml so that it is not required each time the validator client starts. - -Enter a password, or press enter to omit a password: -``` - -The user can choose whether or not they'd like to store the validator password -in the [`validator_definitions.yml`](./validator-management.md) file. If the -password is *not* stored here, the validator client (`lighthouse vc`) -application will ask for the password each time it starts. This might be nice -for some users from a security perspective (i.e., if it is a shared computer), -however it means that if the validator client restarts, the user will be liable -to off-line penalties until they can enter the password. If the user trusts the -computer that is running the validator client and they are seeking maximum -validator rewards, we recommend entering a password at this point. - -Once the process is done the user will see: - -``` -Successfully imported keystore. -Successfully updated validator_definitions.yml. - -Successfully imported 1 validators (0 skipped). - -WARNING: DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH ANOTHER CLIENT, OR YOU WILL GET SLASHED.. -``` - -The import process is complete! - -### 2. Run the `lighthouse vc` command. - -Now the keys are imported the user can start performing their validator duties -by running `lighthouse vc` and checking that their validator public key appears -as a `voting_pubkey` in one of the following logs: - -``` -INFO Enabled validator voting_pubkey: 0xa5e8702533f6d66422e042a0bf3471ab9b302ce115633fa6fdc5643f804b6b4f1c33baf95f125ec21969a3b1e0dd9e56 -``` - -Once this log appears (and there are no errors) the `lighthouse vc` application -will ensure that the validator starts performing its duties and being rewarded -by the protocol. There is no more input required from the user. - -## Docker - -The `import` command is a little more complex for Docker users, but the example -in this document can be substituted with: - -```bash -docker run -it \ - -v $HOME/.lighthouse:/root/.lighthouse \ - -v $(pwd)/validator_keys:/root/validator_keys \ - sigp/lighthouse \ - lighthouse --network MY_NETWORK account validator import --directory /root/validator_keys -``` - -Here we use two `-v` volumes to attach: - -- `~/.lighthouse` on the host to `/root/.lighthouse` in the Docker container. -- The `validator_keys` directory in the present working directory of the host - to the `/root/validator_keys` directory of the Docker container. diff --git a/book/src/validator-management.md b/book/src/validator-management.md index b7d4442de3d..be34fef2c3c 100644 --- a/book/src/validator-management.md +++ b/book/src/validator-management.md @@ -1,10 +1,10 @@ # Validator Management The `lighthouse vc` command starts a *validator client* instance which connects -to a beacon node performs the duties of a staked validator. +to a beacon node to perform the duties of a staked validator. This document provides information on how the validator client discovers the -validators it will act for and how it should obtain their cryptographic +validators it will act for and how it obtains their cryptographic signatures. Users that create validators using the `lighthouse account` tool in the @@ -49,7 +49,7 @@ Each permitted field of the file is listed below for reference: - `enabled`: A `true`/`false` indicating if the validator client should consider this validator "enabled". - `voting_public_key`: A validator public key. -- `type`: How the validator signs messages (currently restricted to `local_keystore`). +- `type`: How the validator signs messages (this can be `local_keystore` or `web3signer` (see [Web3Signer](./validator-web3signer.md))). - `voting_keystore_path`: The path to a EIP-2335 keystore. - `voting_keystore_password_path`: The path to the password for the EIP-2335 keystore. - `voting_keystore_password`: The password to the EIP-2335 keystore. @@ -59,7 +59,7 @@ Each permitted field of the file is listed below for reference: ## Populating the `validator_definitions.yml` file -When validator client starts and the `validator_definitions.yml` file doesn't +When a validator client starts and the `validator_definitions.yml` file doesn't exist, a new file will be created. If the `--disable-auto-discover` flag is provided, the new file will be empty and the validator client will not start any validators. If the `--disable-auto-discover` flag is **not** provided, an @@ -71,7 +71,7 @@ recap: ### Automatic validator discovery -When the `--disable-auto-discover` flag is **not** provided, the validator will search the +When the `--disable-auto-discover` flag is **not** provided, the validator client will search the `validator-dir` for validators and add any *new* validators to the `validator_definitions.yml` with `enabled: true`. @@ -89,7 +89,7 @@ name identical to the `voting_public_key` value. #### Discovery Example -Lets assume the following directory structure: +Let's assume the following directory structure: ``` ~/.lighthouse/{network}/validators @@ -158,7 +158,7 @@ start. If a validator client were to start using the [first example `validator_definitions.yml` file](#example) it would print the following log, -acknowledging there there are two validators and one is disabled: +acknowledging there are two validators and one is disabled: ``` INFO Initialized validators enabled: 1, disabled: 1 @@ -180,8 +180,8 @@ should not be opened by another process. 1. Proceed to act for that validator, creating blocks and attestations if/when required. If there is an error during any of these steps (e.g., a file is missing or -corrupt) the validator client will log an error and continue to attempt to +corrupt), the validator client will log an error and continue to attempt to process other validators. -When the validator client exits (or the validator is deactivated) it will +When the validator client exits (or the validator is deactivated), it will remove the `voting-keystore.json.lock` to indicate that the keystore is free for use again. diff --git a/book/src/validator-monitoring.md b/book/src/validator-monitoring.md index 9074bc0273e..893ec90bdd3 100644 --- a/book/src/validator-monitoring.md +++ b/book/src/validator-monitoring.md @@ -38,7 +38,7 @@ minutes after start up. #### Example ``` -lighthouse bn --staking --validator-monitor-auto +lighthouse bn --http --validator-monitor-auto ``` ### Manual diff --git a/book/src/voluntary-exit.md b/book/src/voluntary-exit.md index 591b7d49109..d90395c07fc 100644 --- a/book/src/voluntary-exit.md +++ b/book/src/voluntary-exit.md @@ -1,7 +1,7 @@ -# Voluntary exits +# Voluntary Exits (Full Withdrawals) A validator may chose to voluntarily stop performing duties (proposing blocks and attesting to blocks) by submitting -a voluntary exit transaction to the beacon chain. +a voluntary exit message to the beacon chain. A validator can initiate a voluntary exit provided that the validator is currently active, has not been slashed and has been active for at least 256 epochs (~27 hours) since it has been activated. @@ -10,21 +10,15 @@ A validator can initiate a voluntary exit provided that the validator is current It takes at a minimum 5 epochs (32 minutes) for a validator to exit after initiating a voluntary exit. This number can be much higher depending on how many other validators are queued to exit. -## Withdrawal of exited funds - -In order to be eligible for fund withdrawal, the validator must have set a withdrawal address and fully exited the network. - -For more information on how fund withdrawal works, please visit - ## Initiating a voluntary exit In order to initiate an exit, users can use the `lighthouse account validator exit` command. -- The `--keystore` flag is used to specify the path to the EIP-2335 voting keystore for the validator. +- The `--keystore` flag is used to specify the path to the EIP-2335 voting keystore for the validator. The path should point directly to the validator key `.json` file, _not_ the folder containing the `.json` file. - The `--beacon-node` flag is used to specify a beacon chain HTTP endpoint that confirms to the [Beacon Node API](https://ethereum.github.io/beacon-APIs/) specifications. That beacon node will be used to validate and propagate the voluntary exit. The default value for this flag is `http://localhost:5052`. -- The `--network` flag is used to specify a particular Eth2 network (default is `mainnet`). +- The `--network` flag is used to specify the network (default is `mainnet`). - The `--password-file` flag is used to specify the path to the file containing the password for the voting keystore. If this flag is not provided, the user will be prompted to enter the password. @@ -36,13 +30,13 @@ The exit phrase is the following: -Below is an example for initiating a voluntary exit on the Prater testnet. +Below is an example for initiating a voluntary exit on the Goerli testnet. ``` -$ lighthouse --network prater account validator exit --keystore /path/to/keystore --beacon-node http://localhost:5052 +$ lighthouse --network goerli account validator exit --keystore /path/to/keystore --beacon-node http://localhost:5052 Running account manager for Prater network -validator-dir path: ~/.lighthouse/prater/validators +validator-dir path: ~/.lighthouse/goerli/validators Enter the keystore password for validator in 0xabcd @@ -52,6 +46,8 @@ Publishing a voluntary exit for validator 0xabcd WARNING: WARNING: THIS IS AN IRREVERSIBLE OPERATION + + PLEASE VISIT https://lighthouse-book.sigmaprime.io/voluntary-exit.html TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT. @@ -65,3 +61,47 @@ Please keep your validator running till exit epoch Exit epoch in approximately 1920 secs ``` +## Full withdrawal of staked fund + +After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023, if a user initiates a voluntary exit, they will receive the full staked funds to the withdrawal address, provided that the validator has withdrawal credentials of type `0x01`. For more information on how fund withdrawal works, please visit [Ethereum.org](https://ethereum.org/en/staking/withdrawals/#how-do-withdrawals-work) website. + +## FAQ + +### 1. How to know if I have the withdrawal credentials type `0x01`? + +There are two types of withdrawal credentials, `0x00` and `0x01`. To check which type your validator has, go to [Staking launchpad](https://launchpad.ethereum.org/en/withdrawals), enter your validator index and click `verify on mainnet`: + + - `withdrawals enabled` means your validator is of type `0x01`, and you will automatically receive the full withdrawal to the withdrawal address that you set. +- `withdrawals not enabled` means your validator is of type `0x00`, and will need to update your withdrawal credentials from `0x00` type to `0x01` type (also known as BLS-to-execution-change, or BTEC) to receive the staked funds. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). + + +### 2. What if my validator is of type `0x00` and I do not update my withdrawal credentials after I initiated a voluntary exit? + + Your staked fund will continue to be locked on the beacon chain. You can update your withdrawal credentials **anytime**, and there is no deadline for that. The catch is that as long as you do not update your withdrawal credentials, your staked funds in the beacon chain will continue to be locked in the beacon chain. Only after you update the withdrawal credentials, will the staked funds be withdrawn to the withdrawal address. + +### 3. How many times can I update my withdrawal credentials? + + If your withdrawal credentials is of type `0x00`, you can only update it once to type `0x01`. It is therefore very important to ensure that the withdrawal address you set is an address under your control, preferably an address controlled by a hardware wallet. + + If your withdrawal credentials is of type `0x01`, it means you have set your withdrawal address previously, and you will not be able to change the withdrawal address. + +### 3. When will my BTEC request (update withdrawal credentials to type `0x01`) be processed ? + + Your BTEC request will be included very quickly as soon as a new block is proposed. This should be the case most (if not all) of the time, given that the peak BTEC request time has now past (right after the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023 and lasted for ~ 2 days) . + +### 4. When will I get my staked fund after voluntary exit if my validator is of type `0x01`? + + There are 3 waiting periods until you get the staked funds in your withdrawal address: + + - An exit queue: a varying time that takes at a minimum 5 epochs (32 minutes) if there is no queue; or if there are many validators exiting at the same time, it has to go through the exit queue. The exit queue can be from hours to weeks, depending on the number of validators in the exit queue. During this time your validator has to stay online to perform its duties to avoid penalties. + + - A fixed waiting period of 256 epochs (27.3 hours) for the validator's status to become withdrawable. + + - A varying time of "validator sweep" that can take up to 5 days (at the time of writing with ~560,000 validators on the mainnet). The "validator sweep" is the process of skimming through all validators by index number for eligible withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set. + + The total time taken is the summation of the above 3 waiting periods. After these waiting periods, you will receive the staked funds in your withdrawal address. + +The voluntary exit and full withdrawal process is summarized in the Figure below. + +![full](./imgs/full-withdrawal.png) + diff --git a/book/src/wallet-create.md b/book/src/wallet-create.md deleted file mode 100644 index 25cac8d34dd..00000000000 --- a/book/src/wallet-create.md +++ /dev/null @@ -1,74 +0,0 @@ -# Create a wallet - -[launchpad]: https://launchpad.ethereum.org/ - -> -> **Note: we recommend using the [Staking launchpad][launchpad] to create validators.** - -A wallet allows for generating practically unlimited validators from an -easy-to-remember 24-word string (a mnemonic). As long as that mnemonic is -backed up, all validator keys can be trivially re-generated. - -The 24-word string is randomly generated during wallet creation and printed out -to the terminal. It's important to **make one or more backups of the mnemonic** -to ensure your ETH is not lost in the case of data loss. It is very important to -**keep your mnemonic private** as it represents the ultimate control of your -ETH. - -Whilst the wallet stores the mnemonic, it does not store it in plain-text: the -mnemonic is encrypted with a password. It is the responsibility of the user to -define a strong password. The password is only required for interacting with -the wallet, it is not required for recovering keys from a mnemonic. - -## Usage - -To create a wallet, use the `lighthouse account wallet` command: - -```bash -lighthouse account wallet create --help - -Creates a new HD (hierarchical-deterministic) EIP-2386 wallet. - -USAGE: - lighthouse account_manager wallet create [OPTIONS] --name --password-file - -FLAGS: - -h, --help Prints help information - -V, --version Prints version information - -OPTIONS: - -d, --datadir Data directory for lighthouse keys and databases. - --mnemonic-output-path - If present, the mnemonic will be saved to this file. DO NOT SHARE THE MNEMONIC. - - --name - The wallet will be created with this name. It is not allowed to create two wallets with the same name for - the same --base-dir. - --password-file - A path to a file containing the password which will unlock the wallet. If the file does not exist, a random - password will be generated and saved at that path. To avoid confusion, if the file does not already exist it - must include a '.pass' suffix. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --type - The type of wallet to create. Only HD (hierarchical-deterministic) wallets are supported presently.. - [default: hd] [possible values: hd] -``` - - -## Example - -Creates a new wallet named `wally` and saves it in `~/.lighthouse/prater/wallets` with a randomly generated password saved -to `./wallet.pass`: - -```bash -lighthouse --network prater account wallet create --name wally --password-file wally.pass -``` - -> Notes: -> -> - The password is not `wally.pass`, it is the _contents_ of the -> `wally.pass` file. -> - If `wally.pass` already exists the wallet password will be set to contents -> of that file. diff --git a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs index ede5bb39481..aa26a843069 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs @@ -987,11 +987,11 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { ops.push(Operation::AssertWeight { block_root: get_root(0), - weight: 33_000, + weight: 33_250, }); ops.push(Operation::AssertWeight { block_root: get_root(1), - weight: 33_000, + weight: 33_250, }); ops.push(Operation::AssertWeight { block_root: get_root(2), @@ -1000,7 +1000,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { ops.push(Operation::AssertWeight { block_root: get_root(3), // This is a "magic number" generated from `calculate_committee_fraction`. - weight: 31_000, + weight: 31_250, }); // Invalidate the payload of 3. diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 2c19206cb75..1cc34beff0c 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1055,13 +1055,9 @@ pub fn calculate_committee_fraction( justified_balances: &JustifiedBalances, proposer_score_boost: u64, ) -> Option { - let average_balance = justified_balances + let committee_weight = justified_balances .total_effective_balance - .checked_div(justified_balances.num_active_validators)?; - let committee_size = justified_balances - .num_active_validators .checked_div(E::slots_per_epoch())?; - let committee_weight = committee_size.checked_mul(average_balance)?; committee_weight .checked_mul(proposer_score_boost)? .checked_div(100) diff --git a/lighthouse/build.rs b/lighthouse/build.rs new file mode 100644 index 00000000000..3d8a25ec8c2 --- /dev/null +++ b/lighthouse/build.rs @@ -0,0 +1,2 @@ +// This is a stub for determining the build profile, see `build_profile_name`. +fn main() {} diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 1845113d8ce..2569286bbf6 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -37,6 +37,17 @@ fn allocator_name() -> &'static str { } } +fn build_profile_name() -> String { + // Nice hack from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime + // The profile name is always the 3rd last part of the path (with 1 based indexing). + // e.g. /code/core/target/cli/build/my-build-info-9f91ba6f99d7a061/out + std::env!("OUT_DIR") + .split(std::path::MAIN_SEPARATOR) + .nth_back(3) + .unwrap_or_else(|| "unknown") + .to_string() +} + fn main() { // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. if std::env::var("RUST_BACKTRACE").is_err() { @@ -58,11 +69,13 @@ fn main() { BLS library: {}\n\ SHA256 hardware acceleration: {}\n\ Allocator: {}\n\ + Profile: {}\n\ Specs: mainnet (true), minimal ({}), gnosis ({})", VERSION.replace("Lighthouse/", ""), bls_library_name(), have_sha_extensions(), allocator_name(), + build_profile_name(), cfg!(feature = "spec-minimal"), cfg!(feature = "gnosis"), ).as_str() diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 9dd67eadc60..7e647c904d7 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -345,6 +345,23 @@ fn trusted_peers_flag() { }); } +#[test] +fn genesis_backfill_flag() { + CommandLineTest::new() + .flag("genesis-backfill", None) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); +} + +/// The genesis backfill flag should be enabled if historic states flag is set. +#[test] +fn genesis_backfill_with_historic_flag() { + CommandLineTest::new() + .flag("reconstruct-historic-states", None) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); +} + #[test] fn always_prefer_builder_payload_flag() { CommandLineTest::new() @@ -1669,6 +1686,25 @@ fn block_cache_size_flag() { .with_config(|config| assert_eq!(config.store.block_cache_size, 4_usize)); } #[test] +fn historic_state_cache_size_flag() { + CommandLineTest::new() + .flag("historic-state-cache-size", Some("4")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.store.historic_state_cache_size, 4_usize)); +} +#[test] +fn historic_state_cache_size_default() { + use beacon_node::beacon_chain::store::config::DEFAULT_HISTORIC_STATE_CACHE_SIZE; + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.store.historic_state_cache_size, + DEFAULT_HISTORIC_STATE_CACHE_SIZE + ); + }); +} +#[test] fn auto_compact_db_flag() { CommandLineTest::new() .flag("auto-compact-db", Some("false"))