Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Backfill blob storage fix #5119

Merged
merged 5 commits into from
Jan 23, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions beacon_node/beacon_chain/src/historical_blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
ChunkWriter::<BlockRoots, _, _>::new(&self.store.cold_db, prev_block_slot.as_usize())?;
let mut new_oldest_blob_slot = blob_info.oldest_blob_slot;

let mut blob_batch = Vec::with_capacity(n_blobs_lists_to_import);
let mut cold_batch = Vec::with_capacity(blocks_to_import.len());
let mut hot_batch = Vec::with_capacity(blocks_to_import.len() + n_blobs_lists_to_import);
let mut hot_batch = Vec::with_capacity(blocks_to_import.len());
let mut signed_blocks = Vec::with_capacity(blocks_to_import.len());

for available_block in blocks_to_import.into_iter().rev() {
Expand All @@ -124,7 +125,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
if let Some(blobs) = maybe_blobs {
new_oldest_blob_slot = Some(block.slot());
self.store
.blobs_as_kv_store_ops(&block_root, blobs, &mut hot_batch);
.blobs_as_kv_store_ops(&block_root, blobs, &mut blob_batch);
}

// Store block roots, including at all skip slots in the freezer DB.
Expand Down Expand Up @@ -199,6 +200,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Write the I/O batches to disk, writing the blocks themselves first, as it's better
// for the hot DB to contain extra blocks than for the cold DB to point to blocks that
// do not exist.
self.store.blobs_db.do_atomically(blob_batch)?;
self.store.hot_db.do_atomically(hot_batch)?;
self.store.cold_db.do_atomically(cold_batch)?;

Expand Down
9 changes: 9 additions & 0 deletions beacon_node/beacon_chain/src/schema_change.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
//! Utilities for managing database schema changes.
mod migration_schema_v17;
mod migration_schema_v18;
mod migration_schema_v19;

use crate::beacon_chain::BeaconChainTypes;
use crate::types::ChainSpec;
Expand Down Expand Up @@ -69,6 +70,14 @@ pub fn migrate_schema<T: BeaconChainTypes>(
let ops = migration_schema_v18::downgrade_from_v18::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(18), SchemaVersion(19)) => {
let ops = migration_schema_v19::upgrade_to_v19::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(19), SchemaVersion(18)) => {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we need the downgrade if it's no-op?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was thinking so we could support 19 -> 17 for example

let ops = migration_schema_v19::downgrade_from_v19::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
// Anything else is an error.
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
target_version: to,
Expand Down
55 changes: 55 additions & 0 deletions beacon_node/beacon_chain/src/schema_change/migration_schema_v19.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
use crate::beacon_chain::BeaconChainTypes;
use slog::{debug, info, Logger};
use std::sync::Arc;
use store::{get_key_for_col, DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp};

pub fn upgrade_to_v19<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
let mut hot_delete_ops = vec![];
let mut blob_keys = vec![];
let column = DBColumn::BeaconBlob;

debug!(log, "Migrating from v18 to v19");
// Iterate throught the blobs on disk.
for res in db.hot_db.iter_column_keys::<Vec<u8>>(column) {
let key = res?;
let key_col = get_key_for_col(column.as_str(), &key);
hot_delete_ops.push(KeyValueStoreOp::DeleteKey(key_col));
blob_keys.push(key);
}
let num_blobs = blob_keys.len();
debug!(log, "Collected {} blob lists to migrate", num_blobs);

for key in blob_keys {
let next_blob = db.hot_db.get_bytes(column.as_str(), &key)?;
if let Some(next_blob) = next_blob {
let key_col = get_key_for_col(column.as_str(), &key);
db.blobs_db
.do_atomically(vec![KeyValueStoreOp::PutKeyValue(key_col, next_blob)])?;
}
}
debug!(log, "Wrote {} blobs to the blobs db", num_blobs);

// Delete all the blobs
info!(
log,
"Upgrading to v19 schema";
"info" => "ready for Deneb once it is scheduled"
michaelsproul marked this conversation as resolved.
Show resolved Hide resolved
);
Ok(hot_delete_ops)
}

pub fn downgrade_from_v19<T: BeaconChainTypes>(
_db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> {
// No-op
info!(
log,
"Downgrading to v18 schema";
);

Ok(vec![])
}
2 changes: 1 addition & 1 deletion beacon_node/store/src/metadata.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use types::{Checkpoint, Hash256, Slot};

pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(18);
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(19);

// All the keys that get stored under the `BeaconMeta` column.
//
Expand Down
6 changes: 4 additions & 2 deletions book/src/database-migrations.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ validator client or the slasher**.

| Lighthouse version | Release date | Schema version | Downgrade available? |
|--------------------|--------------|----------------|----------------------|
| v4.6.0 | Dec 2023 | v18 | yes before Deneb |
| v4.6.0 | Dec 2023 | v19 | yes before Deneb |
| v4.6.0-rc.0 | Dec 2023 | v18 | yes before Deneb |
| v4.5.0 | Sep 2023 | v17 | yes |
| v4.4.0 | Aug 2023 | v17 | yes |
| v4.3.0 | Jul 2023 | v17 | yes |
Expand Down Expand Up @@ -192,7 +193,8 @@ Here are the steps to prune historic states:

| Lighthouse version | Release date | Schema version | Downgrade available? |
|--------------------|--------------|----------------|-------------------------------------|
| v4.6.0 | Dec 2023 | v18 | yes before Deneb |
| v4.6.0 | Dec 2023 | v19 | yes before Deneb |
| v4.6.0-rc.0 | Dec 2023 | v18 | yes before Deneb |
| v4.5.0 | Sep 2023 | v17 | yes |
| v4.4.0 | Aug 2023 | v17 | yes |
| v4.3.0 | Jul 2023 | v17 | yes |
Expand Down
Loading