Skip to content

Commit

Permalink
remove more loggers
Browse files Browse the repository at this point in the history
  • Loading branch information
ThreeHrSleep committed Sep 22, 2024
1 parent 8d7a009 commit 49745c0
Show file tree
Hide file tree
Showing 34 changed files with 58 additions and 192 deletions.
13 changes: 4 additions & 9 deletions beacon_node/beacon_chain/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -820,12 +820,8 @@ where
})?;

let migrator_config = self.store_migrator_config.unwrap_or_default();
let store_migrator = BackgroundMigrator::new(
store.clone(),
migrator_config,
genesis_block_root,
log.clone(),
);
let store_migrator =
BackgroundMigrator::new(store.clone(), migrator_config, genesis_block_root);

if let Some(slot) = slot_clock.now() {
validator_monitor.process_valid_state(
Expand Down Expand Up @@ -970,7 +966,6 @@ where
self.beacon_graffiti,
self.execution_layer,
slot_clock.slot_duration() * E::slots_per_epoch() as u32,
log.clone(),
),
slasher: self.slasher.clone(),
validator_monitor: RwLock::new(validator_monitor),
Expand Down Expand Up @@ -1077,8 +1072,7 @@ where
.as_ref()
.ok_or("dummy_eth1_backend requires a log")?;

let backend =
CachingEth1Backend::new(Eth1Config::default(), log.clone(), self.spec.clone())?;
let backend = CachingEth1Backend::new(Eth1Config::default(), self.spec.clone())?;

self.eth1_chain = Some(Eth1Chain::new_dummy(backend));

Expand Down Expand Up @@ -1158,6 +1152,7 @@ mod test {
use std::time::Duration;
use store::config::StoreConfig;
use store::{HotColdDB, MemoryStore};

use task_executor::test_utils::TestRuntime;
use types::{EthSpec, MinimalEthSpec, Slot};

Expand Down
38 changes: 8 additions & 30 deletions beacon_node/beacon_chain/src/eth1_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService};
use eth2::lighthouse::Eth1SyncStatusData;
use ethereum_hashing::hash;
use int_to_bytes::int_to_bytes32;
use slog::Logger;
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use state_processing::per_block_processing::get_new_eth1_data;
Expand Down Expand Up @@ -284,11 +283,9 @@ where
pub fn from_ssz_container(
ssz_container: &SszEth1,
config: Eth1Config,
log: &Logger,
spec: ChainSpec,
) -> Result<Self, String> {
let backend =
Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, log.clone(), spec)?;
let backend = Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, spec)?;
Ok(Self {
use_dummy_backend: ssz_container.use_dummy_backend,
backend,
Expand Down Expand Up @@ -352,12 +349,7 @@ pub trait Eth1ChainBackend<E: EthSpec>: Sized + Send + Sync {
fn as_bytes(&self) -> Vec<u8>;

/// Create a `Eth1ChainBackend` instance given encoded bytes.
fn from_bytes(
bytes: &[u8],
config: Eth1Config,
log: Logger,
spec: ChainSpec,
) -> Result<Self, String>;
fn from_bytes(bytes: &[u8], config: Eth1Config, spec: ChainSpec) -> Result<Self, String>;
}

/// Provides a simple, testing-only backend that generates deterministic, meaningless eth1 data.
Expand Down Expand Up @@ -410,12 +402,7 @@ impl<E: EthSpec> Eth1ChainBackend<E> for DummyEth1ChainBackend<E> {
}

/// Create dummy eth1 backend.
fn from_bytes(
_bytes: &[u8],
_config: Eth1Config,
_log: Logger,
_spec: ChainSpec,
) -> Result<Self, String> {
fn from_bytes(_bytes: &[u8], _config: Eth1Config, _spec: ChainSpec) -> Result<Self, String> {
Ok(Self(PhantomData))
}
}
Expand All @@ -434,19 +421,17 @@ impl<E: EthSpec> Default for DummyEth1ChainBackend<E> {
#[derive(Clone)]
pub struct CachingEth1Backend<E: EthSpec> {
pub core: HttpService,
log: Logger,
_phantom: PhantomData<E>,
}

impl<E: EthSpec> CachingEth1Backend<E> {
/// Instantiates `self` with empty caches.
///
/// Does not connect to the eth1 node or start any tasks to keep the cache updated.
pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Result<Self, String> {
pub fn new(config: Eth1Config, spec: ChainSpec) -> Result<Self, String> {
Ok(Self {
core: HttpService::new(config, log.clone(), spec)
core: HttpService::new(config, spec)
.map_err(|e| format!("Failed to create eth1 http service: {:?}", e))?,
log,
_phantom: PhantomData,
})
}
Expand All @@ -459,7 +444,6 @@ impl<E: EthSpec> CachingEth1Backend<E> {
/// Instantiates `self` from an existing service.
pub fn from_service(service: HttpService) -> Self {
Self {
log: service.log.clone(),
core: service,
_phantom: PhantomData,
}
Expand Down Expand Up @@ -589,16 +573,10 @@ impl<E: EthSpec> Eth1ChainBackend<E> for CachingEth1Backend<E> {
}

/// Recover the cached backend from encoded bytes.
fn from_bytes(
bytes: &[u8],
config: Eth1Config,
log: Logger,
spec: ChainSpec,
) -> Result<Self, String> {
let inner = HttpService::from_bytes(bytes, config, log.clone(), spec)?;
fn from_bytes(bytes: &[u8], config: Eth1Config, spec: ChainSpec) -> Result<Self, String> {
let inner = HttpService::from_bytes(bytes, config, spec)?;
Ok(Self {
core: inner,
log,
_phantom: PhantomData,
})
}
Expand Down Expand Up @@ -749,7 +727,7 @@ mod test {

let log = test_logger();
Eth1Chain::new(
CachingEth1Backend::new(eth1_config, log, MainnetEthSpec::default_spec()).unwrap(),
CachingEth1Backend::new(eth1_config, MainnetEthSpec::default_spec()).unwrap(),
)
}

Expand Down
1 change: 0 additions & 1 deletion beacon_node/beacon_chain/src/fork_revert.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
use crate::{BeaconForkChoiceStore, BeaconSnapshot};
use fork_choice::{ForkChoice, PayloadVerificationStatus};
use itertools::process_results;
use slog::Logger;
use state_processing::state_advance::complete_state_advance;
use state_processing::{
per_block_processing, per_block_processing::BlockSignatureStrategy, ConsensusContext,
Expand Down
15 changes: 3 additions & 12 deletions beacon_node/beacon_chain/src/graffiti_calculator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use crate::BeaconChainTypes;
use execution_layer::{http::ENGINE_GET_CLIENT_VERSION_V1, CommitPrefix, ExecutionLayer};
use logging::crit;
use serde::{Deserialize, Serialize};
use slog::Logger;

use slot_clock::SlotClock;
use std::{fmt::Debug, time::Duration};
use task_executor::TaskExecutor;
Expand Down Expand Up @@ -53,21 +53,18 @@ pub struct GraffitiCalculator<T: BeaconChainTypes> {
pub beacon_graffiti: GraffitiOrigin,
execution_layer: Option<ExecutionLayer<T::EthSpec>>,
pub epoch_duration: Duration,
log: Logger,
}

impl<T: BeaconChainTypes> GraffitiCalculator<T> {
pub fn new(
beacon_graffiti: GraffitiOrigin,
execution_layer: Option<ExecutionLayer<T::EthSpec>>,
epoch_duration: Duration,
log: Logger,
) -> Self {
Self {
beacon_graffiti,
execution_layer,
epoch_duration,
log,
}
}

Expand Down Expand Up @@ -163,13 +160,8 @@ pub fn start_engine_version_cache_refresh_service<T: BeaconChainTypes>(
let epoch_duration = chain.graffiti_calculator.epoch_duration;
executor.spawn(
async move {
engine_version_cache_refresh_service::<T>(
execution_layer,
slot_clock,
epoch_duration,
log,
)
.await
engine_version_cache_refresh_service::<T>(execution_layer, slot_clock, epoch_duration)
.await
},
"engine_version_cache_refresh_service",
);
Expand All @@ -179,7 +171,6 @@ async fn engine_version_cache_refresh_service<T: BeaconChainTypes>(
execution_layer: ExecutionLayer<T::EthSpec>,
slot_clock: T::SlotClock,
epoch_duration: Duration,
log: Logger,
) {
// Preload the engine version cache after a brief delay to allow for EL initialization.
// This initial priming ensures cache readiness before the service's regular update cycle begins.
Expand Down
39 changes: 11 additions & 28 deletions beacon_node/beacon_chain/src/migrate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ use crate::errors::BeaconChainError;
use crate::head_tracker::{HeadTracker, SszHeadTracker};
use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT};
use parking_lot::Mutex;
use slog::Logger;
use std::collections::{HashMap, HashSet};
use std::mem;
use std::sync::{mpsc, Arc};
Expand Down Expand Up @@ -39,7 +38,6 @@ pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>
tx_thread: Option<Mutex<(mpsc::Sender<Notification>, thread::JoinHandle<()>)>>,
/// Genesis block root, for persisting the `PersistedBeaconChain`.
genesis_block_root: Hash256,
log: Logger,
}

#[derive(Debug, Clone, PartialEq, Eq)]
Expand Down Expand Up @@ -135,7 +133,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
db: Arc<HotColdDB<E, Hot, Cold>>,
config: MigratorConfig,
genesis_block_root: Hash256,
log: Logger,
) -> Self {
// Estimate last migration run from DB split slot.
let prev_migration = Arc::new(Mutex::new(PrevMigration {
Expand All @@ -145,14 +142,13 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
let tx_thread = if config.blocking {
None
} else {
Some(Mutex::new(Self::spawn_thread(db.clone(), log.clone())))
Some(Mutex::new(Self::spawn_thread(db.clone())))
};
Self {
db,
tx_thread,
prev_migration,
genesis_block_root,
log,
}
}

Expand All @@ -179,7 +175,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
if let Some(Notification::Finalization(notif)) =
self.send_background_notification(Notification::Finalization(notif))
{
Self::run_migration(self.db.clone(), notif, &self.log);
Self::run_migration(self.db.clone(), notif);
}

Ok(())
Expand All @@ -189,19 +185,19 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
if let Some(Notification::Reconstruction) =
self.send_background_notification(Notification::Reconstruction)
{
Self::run_reconstruction(self.db.clone(), &self.log);
Self::run_reconstruction(self.db.clone());
}
}

pub fn process_prune_blobs(&self, data_availability_boundary: Epoch) {
if let Some(Notification::PruneBlobs(data_availability_boundary)) =
self.send_background_notification(Notification::PruneBlobs(data_availability_boundary))
{
Self::run_prune_blobs(self.db.clone(), data_availability_boundary, &self.log);
Self::run_prune_blobs(self.db.clone(), data_availability_boundary);
}
}

pub fn run_reconstruction(db: Arc<HotColdDB<E, Hot, Cold>>, log: &Logger) {
pub fn run_reconstruction(db: Arc<HotColdDB<E, Hot, Cold>>) {
if let Err(e) = db.reconstruct_historic_states() {
error!(
error = ?e,
Expand All @@ -210,11 +206,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
}
}

pub fn run_prune_blobs(
db: Arc<HotColdDB<E, Hot, Cold>>,
data_availability_boundary: Epoch,
log: &Logger,
) {
pub fn run_prune_blobs(db: Arc<HotColdDB<E, Hot, Cold>>, data_availability_boundary: Epoch) {
if let Err(e) = db.try_prune_blobs(false, data_availability_boundary) {
error!(
error = ?e,
Expand All @@ -234,7 +226,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho

// Restart the background thread if it has crashed.
if let Err(tx_err) = tx.send(notif) {
let (new_tx, new_thread) = Self::spawn_thread(self.db.clone(), self.log.clone());
let (new_tx, new_thread) = Self::spawn_thread(self.db.clone());

*tx = new_tx;
let old_thread = mem::replace(thread, new_thread);
Expand All @@ -259,11 +251,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
}

/// Perform the actual work of `process_finalization`.
fn run_migration(
db: Arc<HotColdDB<E, Hot, Cold>>,
notif: FinalizationNotification,
log: &Logger,
) {
fn run_migration(db: Arc<HotColdDB<E, Hot, Cold>>, notif: FinalizationNotification) {
// Do not run too frequently.
let epoch = notif.finalized_checkpoint.epoch;
let mut prev_migration = notif.prev_migration.lock();
Expand Down Expand Up @@ -307,7 +295,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
&finalized_state,
notif.finalized_checkpoint,
notif.genesis_block_root,
log,
) {
Ok(PruningOutcome::Successful {
old_finalized_checkpoint,
Expand Down Expand Up @@ -361,7 +348,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
db,
old_finalized_checkpoint.epoch,
notif.finalized_checkpoint.epoch,
log,
) {
warn!(error = ?e, "Database compaction failed");
}
Expand All @@ -374,7 +360,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
/// Return a channel handle for sending requests to the thread.
fn spawn_thread(
db: Arc<HotColdDB<E, Hot, Cold>>,
log: Logger,
) -> (mpsc::Sender<Notification>, thread::JoinHandle<()>) {
let (tx, rx) = mpsc::channel();
let thread = thread::spawn(move || {
Expand Down Expand Up @@ -409,13 +394,13 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
}
// If reconstruction is on-going, ignore finalization migration and blob pruning.
if reconstruction_notif.is_some() {
Self::run_reconstruction(db.clone(), &log);
Self::run_reconstruction(db.clone());
} else {
if let Some(fin) = finalization_notif {
Self::run_migration(db.clone(), fin, &log);
Self::run_migration(db.clone(), fin);
}
if let Some(dab) = prune_blobs_notif {
Self::run_prune_blobs(db.clone(), dab, &log);
Self::run_prune_blobs(db.clone(), dab);
}
}
}
Expand All @@ -434,7 +419,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
new_finalized_state: &BeaconState<E>,
new_finalized_checkpoint: Checkpoint,
genesis_block_root: Hash256,
log: &Logger,
) -> Result<PruningOutcome, BeaconChainError> {
let old_finalized_checkpoint =
store
Expand Down Expand Up @@ -706,7 +690,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
db: Arc<HotColdDB<E, Hot, Cold>>,
old_finalized_epoch: Epoch,
new_finalized_epoch: Epoch,
log: &Logger,
) -> Result<(), Error> {
if !db.compact_on_prune() {
return Ok(());
Expand Down
Loading

0 comments on commit 49745c0

Please sign in to comment.