Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Revert "state-db: Print warning when using large pruning window on Ro…
Browse files Browse the repository at this point in the history
…cksDb (#13414)"

This reverts commit a118bf3.
  • Loading branch information
Ross Bulat committed Feb 19, 2023
1 parent a118bf3 commit 349af95
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 71 deletions.
4 changes: 1 addition & 3 deletions client/state-db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,6 @@ use std::{
fmt,
};

const LOG_TARGET: &str = "state-db";
const LOG_TARGET_PIN: &str = "state-db::pin";
const PRUNING_MODE: &[u8] = b"mode";
const PRUNING_MODE_ARCHIVE: &[u8] = b"archive";
const PRUNING_MODE_ARCHIVE_CANON: &[u8] = b"archive_canonical";
Expand Down Expand Up @@ -311,7 +309,7 @@ impl<BlockHash: Hash, Key: Hash, D: MetaDb> StateDbSync<BlockHash, Key, D> {
ref_counting: bool,
db: D,
) -> Result<StateDbSync<BlockHash, Key, D>, Error<D::Error>> {
trace!(target: LOG_TARGET, "StateDb settings: {:?}. Ref-counting: {}", mode, ref_counting);
trace!(target: "state-db", "StateDb settings: {:?}. Ref-counting: {}", mode, ref_counting);

let non_canonical: NonCanonicalOverlay<BlockHash, Key> = NonCanonicalOverlay::new(&db)?;
let pruning: Option<RefWindow<BlockHash, Key, D>> = match mode {
Expand Down
47 changes: 11 additions & 36 deletions client/state-db/src/noncanonical.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@
//! Maintains trees of block overlays and allows discarding trees/roots
//! The overlays are added in `insert` and removed in `canonicalize`.
use crate::{LOG_TARGET, LOG_TARGET_PIN};

use super::{to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb, StateDbError};
use codec::{Decode, Encode};
use log::trace;
Expand Down Expand Up @@ -180,12 +178,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
let mut values = HashMap::new();
if let Some((ref hash, mut block)) = last_canonicalized {
// read the journal
trace!(
target: LOG_TARGET,
"Reading uncanonicalized journal. Last canonicalized #{} ({:?})",
block,
hash
);
trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash);
let mut total: u64 = 0;
block += 1;
loop {
Expand All @@ -205,7 +198,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
};
insert_values(&mut values, record.inserted);
trace!(
target: LOG_TARGET,
target: "state-db",
"Uncanonicalized journal entry {}.{} ({:?}) ({} inserted, {} deleted)",
block,
index,
Expand All @@ -224,11 +217,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
levels.push_back(level);
block += 1;
}
trace!(
target: LOG_TARGET,
"Finished reading uncanonicalized journal, {} entries",
total
);
trace!(target: "state-db", "Finished reading uncanonicalized journal, {} entries", total);
}
Ok(NonCanonicalOverlay {
last_canonicalized,
Expand Down Expand Up @@ -263,9 +252,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
} else if self.last_canonicalized.is_some() {
if number < front_block_number || number > front_block_number + self.levels.len() as u64
{
trace!(
target: LOG_TARGET,
"Failed to insert block {}, current is {} .. {})",
trace!(target: "state-db", "Failed to insert block {}, current is {} .. {})",
number,
front_block_number,
front_block_number + self.levels.len() as u64,
Expand Down Expand Up @@ -297,7 +284,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {

if level.blocks.len() >= MAX_BLOCKS_PER_LEVEL as usize {
trace!(
target: LOG_TARGET,
target: "state-db",
"Too many sibling blocks at #{number}: {:?}",
level.blocks.iter().map(|b| &b.hash).collect::<Vec<_>>()
);
Expand Down Expand Up @@ -327,15 +314,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
deleted: changeset.deleted,
};
commit.meta.inserted.push((journal_key, journal_record.encode()));
trace!(
target: LOG_TARGET,
"Inserted uncanonicalized changeset {}.{} {:?} ({} inserted, {} deleted)",
number,
index,
hash,
journal_record.inserted.len(),
journal_record.deleted.len()
);
trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} {:?} ({} inserted, {} deleted)", number, index, hash, journal_record.inserted.len(), journal_record.deleted.len());
insert_values(&mut self.values, journal_record.inserted);
Ok(commit)
}
Expand Down Expand Up @@ -389,7 +368,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
hash: &BlockHash,
commit: &mut CommitSet<Key>,
) -> Result<u64, StateDbError> {
trace!(target: LOG_TARGET, "Canonicalizing {:?}", hash);
trace!(target: "state-db", "Canonicalizing {:?}", hash);
let level = match self.levels.pop_front() {
Some(level) => level,
None => return Err(StateDbError::InvalidBlock),
Expand Down Expand Up @@ -453,7 +432,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
.meta
.inserted
.push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode()));
trace!(target: LOG_TARGET, "Discarding {} records", commit.meta.deleted.len());
trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len());

let num = canonicalized.1;
self.last_canonicalized = Some(canonicalized);
Expand Down Expand Up @@ -500,7 +479,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
};
// Check that it does not have any children
if (level_index != level_count - 1) && self.parents.values().any(|h| h == hash) {
log::debug!(target: LOG_TARGET, "Trying to remove block {:?} with children", hash);
log::debug!(target: "state-db", "Trying to remove block {:?} with children", hash);
return None
}
let overlay = level.remove(index);
Expand All @@ -523,7 +502,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
pub fn pin(&mut self, hash: &BlockHash) {
let refs = self.pinned.entry(hash.clone()).or_default();
if *refs == 0 {
trace!(target: LOG_TARGET_PIN, "Pinned non-canon block: {:?}", hash);
trace!(target: "state-db-pin", "Pinned non-canon block: {:?}", hash);
}
*refs += 1;
}
Expand Down Expand Up @@ -552,11 +531,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
entry.get_mut().1 -= 1;
if entry.get().1 == 0 {
let (inserted, _) = entry.remove();
trace!(
target: LOG_TARGET_PIN,
"Discarding unpinned non-canon block: {:?}",
hash
);
trace!(target: "state-db-pin", "Discarding unpinned non-canon block: {:?}", hash);
discard_values(&mut self.values, inserted);
self.parents.remove(&hash);
}
Expand Down
38 changes: 6 additions & 32 deletions client/state-db/src/pruning.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
use crate::{
noncanonical::LAST_CANONICAL, to_meta_key, CommitSet, Error, Hash, MetaDb, StateDbError,
DEFAULT_MAX_BLOCK_CONSTRAINT, LOG_TARGET,
DEFAULT_MAX_BLOCK_CONSTRAINT,
};
use codec::{Decode, Encode};
use log::trace;
Expand Down Expand Up @@ -79,24 +79,14 @@ impl<BlockHash: Hash, Key: Hash, D: MetaDb> DeathRowQueue<BlockHash, Key, D> {
death_index: HashMap::new(),
};
// read the journal
trace!(
target: LOG_TARGET,
"Reading pruning journal for the memory queue. Pending #{}",
base,
);
trace!(target: "state-db", "Reading pruning journal for the memory queue. Pending #{}", base);
loop {
let journal_key = to_journal_key(block);
match db.get_meta(&journal_key).map_err(Error::Db)? {
Some(record) => {
let record: JournalRecord<BlockHash, Key> =
Decode::decode(&mut record.as_slice())?;
trace!(
target: LOG_TARGET,
"Pruning journal entry {} ({} inserted, {} deleted)",
block,
record.inserted.len(),
record.deleted.len(),
);
trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len());
queue.import(base, block, record);
},
None => break,
Expand All @@ -117,25 +107,21 @@ impl<BlockHash: Hash, Key: Hash, D: MetaDb> DeathRowQueue<BlockHash, Key, D> {
// limit the cache capacity from 1 to `DEFAULT_MAX_BLOCK_CONSTRAINT`
let cache_capacity = window_size.clamp(1, DEFAULT_MAX_BLOCK_CONSTRAINT) as usize;
let mut cache = VecDeque::with_capacity(cache_capacity);
trace!(
target: LOG_TARGET,
"Reading pruning journal for the database-backed queue. Pending #{}",
base
);
trace!(target: "state-db", "Reading pruning journal for the database-backed queue. Pending #{}", base);
DeathRowQueue::load_batch_from_db(&db, &mut cache, base, cache_capacity)?;
Ok(DeathRowQueue::DbBacked { db, cache, cache_capacity, last })
}

/// import a new block to the back of the queue
fn import(&mut self, base: u64, num: u64, journal_record: JournalRecord<BlockHash, Key>) {
let JournalRecord { hash, inserted, deleted } = journal_record;
trace!(target: LOG_TARGET, "Importing {}, base={}", num, base);
trace!(target: "state-db", "Importing {}, base={}", num, base);
match self {
DeathRowQueue::DbBacked { cache, cache_capacity, last, .. } => {
// If the new block continues cached range and there is space, load it directly into
// cache.
if num == base + cache.len() as u64 && cache.len() < *cache_capacity {
trace!(target: LOG_TARGET, "Adding to DB backed cache {:?} (#{})", hash, num);
trace!(target: "state-db", "Adding to DB backed cache {:?} (#{})", hash, num);
cache.push_back(DeathRow { hash, deleted: deleted.into_iter().collect() });
}
*last = Some(num);
Expand Down Expand Up @@ -320,18 +306,6 @@ impl<BlockHash: Hash, Key: Hash, D: MetaDb> RefWindow<BlockHash, Key, D> {
};

let queue = if count_insertions {
// Highly scientific crafted number for deciding when to print the warning!
//
// Rocksdb doesn't support refcounting and requires that we load the entire pruning
// window into the memory.
if window_size > 1000 {
log::warn!(
target: LOG_TARGET,
"Large pruning window of {window_size} detected! THIS CAN LEAD TO HIGH MEMORY USAGE AND CRASHES. \
Reduce the pruning window or switch your database to paritydb."
);
}

DeathRowQueue::new_mem(&db, base)?
} else {
let last = match last_canonicalized_number {
Expand Down

0 comments on commit 349af95

Please sign in to comment.