diff --git a/Cargo.lock b/Cargo.lock index a613b7ba03686..a89ed4538ff9d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4336,6 +4336,7 @@ dependencies = [ "sc-chain-spec", "sc-cli", "sc-client-api", + "sc-client-db", "sc-consensus", "sc-consensus-babe", "sc-consensus-epochs", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 75ac03266cff9..acbf1b9888b86 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -109,6 +109,7 @@ sp-trie = { version = "4.0.0-dev", default-features = false, path = "../../../pr [dev-dependencies] sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } +sc-client-db = { version = "0.10.0-dev", path = "../../../client/db" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" } sc-consensus-epochs = { version = "0.10.0-dev", path = "../../../client/consensus/epochs" } diff --git a/bin/node/cli/tests/check_block_works.rs b/bin/node/cli/tests/check_block_works.rs index 39963fb002876..707fd217e33e8 100644 --- a/bin/node/cli/tests/check_block_works.rs +++ b/bin/node/cli/tests/check_block_works.rs @@ -28,7 +28,7 @@ pub mod common; fn check_block_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_dev_node_for_a_while(base_path.path()); + common::run_node_for_a_while(base_path.path(), &["--dev"]); let status = Command::new(cargo_bin("substrate")) .args(&["check-block", "--dev", "--pruning", "archive", "-d"]) diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs index 50776202d79eb..54b9c749bf1de 100644 --- a/bin/node/cli/tests/common.rs +++ b/bin/node/cli/tests/common.rs @@ -54,10 +54,10 @@ pub fn wait_for(child: &mut Child, secs: usize) -> Option { } /// Run the node for a while (30 seconds) -pub fn run_dev_node_for_a_while(base_path: &Path) { +pub fn run_node_for_a_while(base_path: &Path, args: &[&str]) { let mut cmd = Command::new(cargo_bin("substrate")); - let mut cmd = cmd.args(&["--dev"]).arg("-d").arg(base_path).spawn().unwrap(); + let mut cmd = cmd.args(args).arg("-d").arg(base_path).spawn().unwrap(); // Let it produce some blocks. thread::sleep(Duration::from_secs(30)); @@ -67,3 +67,14 @@ pub fn run_dev_node_for_a_while(base_path: &Path) { kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); assert!(wait_for(&mut cmd, 40).map(|x| x.success()).unwrap_or_default()); } + +/// Run the node asserting that it fails with an error +pub fn run_node_assert_fail(base_path: &Path, args: &[&str]) { + let mut cmd = Command::new(cargo_bin("substrate")); + + let mut cmd = cmd.args(args).arg("-d").arg(base_path).spawn().unwrap(); + + // Let it produce some blocks. + thread::sleep(Duration::from_secs(10)); + assert!(cmd.try_wait().unwrap().is_some(), "the process should not be running anymore"); +} diff --git a/bin/node/cli/tests/database_role_subdir_migration.rs b/bin/node/cli/tests/database_role_subdir_migration.rs new file mode 100644 index 0000000000000..516908111ae72 --- /dev/null +++ b/bin/node/cli/tests/database_role_subdir_migration.rs @@ -0,0 +1,115 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sc_client_db::{ + light::LightStorage, DatabaseSettings, DatabaseSource, KeepBlocks, PruningMode, + TransactionStorageMode, +}; +use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; +use tempfile::tempdir; + +pub mod common; + +#[test] +#[cfg(unix)] +fn database_role_subdir_migration() { + type Block = RawBlock>; + + let base_path = tempdir().expect("could not create a temp dir"); + let path = base_path.path().join("chains/dev/db"); + // create a dummy database dir + { + let _old_db = LightStorage::::new(DatabaseSettings { + state_cache_size: 0, + state_cache_child_ratio: None, + state_pruning: PruningMode::ArchiveAll, + source: DatabaseSource::RocksDb { path: path.to_path_buf(), cache_size: 128 }, + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + }) + .unwrap(); + } + + assert!(path.join("db_version").exists()); + assert!(!path.join("light").exists()); + + // start a light client + common::run_node_for_a_while( + base_path.path(), + &[ + "--dev", + "--light", + "--port", + "30335", + "--rpc-port", + "44444", + "--ws-port", + "44445", + "--no-prometheus", + ], + ); + + // check if the database dir had been migrated + assert!(!path.join("db_version").exists()); + assert!(path.join("light/db_version").exists()); +} + +#[test] +#[cfg(unix)] +fn database_role_subdir_migration_fail_on_different_role() { + type Block = RawBlock>; + + let base_path = tempdir().expect("could not create a temp dir"); + let path = base_path.path().join("chains/dev/db"); + + // create a database with the old layout + { + let _old_db = LightStorage::::new(DatabaseSettings { + state_cache_size: 0, + state_cache_child_ratio: None, + state_pruning: PruningMode::ArchiveAll, + source: DatabaseSource::RocksDb { path: path.to_path_buf(), cache_size: 128 }, + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + }) + .unwrap(); + } + + assert!(path.join("db_version").exists()); + assert!(!path.join("light/db_version").exists()); + + // start a client with a different role (full), it should fail and not change any files on disk + common::run_node_assert_fail( + &base_path.path(), + &[ + "--dev", + "--port", + "30334", + "--rpc-port", + "44446", + "--ws-port", + "44447", + "--no-prometheus", + ], + ); + + // check if the files are unchanged + assert!(path.join("db_version").exists()); + assert!(!path.join("light/db_version").exists()); + assert!(!path.join("full/db_version").exists()); +} diff --git a/bin/node/cli/tests/export_import_flow.rs b/bin/node/cli/tests/export_import_flow.rs index 7bf64900b752a..7cbaa152699b4 100644 --- a/bin/node/cli/tests/export_import_flow.rs +++ b/bin/node/cli/tests/export_import_flow.rs @@ -188,7 +188,7 @@ fn export_import_revert() { let exported_blocks_file = base_path.path().join("exported_blocks"); let db_path = base_path.path().join("db"); - common::run_dev_node_for_a_while(base_path.path()); + common::run_node_for_a_while(base_path.path(), &["--dev"]); let mut executor = ExportImportRevertExecutor::new(&base_path, &exported_blocks_file, &db_path); diff --git a/bin/node/cli/tests/inspect_works.rs b/bin/node/cli/tests/inspect_works.rs index 67dbc97056cf6..2a89801547a4b 100644 --- a/bin/node/cli/tests/inspect_works.rs +++ b/bin/node/cli/tests/inspect_works.rs @@ -28,7 +28,7 @@ pub mod common; fn inspect_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_dev_node_for_a_while(base_path.path()); + common::run_node_for_a_while(base_path.path(), &["--dev"]); let status = Command::new(cargo_bin("substrate")) .args(&["inspect", "--dev", "--pruning", "archive", "-d"]) diff --git a/bin/node/cli/tests/purge_chain_works.rs b/bin/node/cli/tests/purge_chain_works.rs index 4c0727d26cb17..0f16a51e5d0a4 100644 --- a/bin/node/cli/tests/purge_chain_works.rs +++ b/bin/node/cli/tests/purge_chain_works.rs @@ -27,7 +27,7 @@ pub mod common; fn purge_chain_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_dev_node_for_a_while(base_path.path()); + common::run_node_for_a_while(base_path.path(), &["--dev"]); let status = Command::new(cargo_bin("substrate")) .args(&["purge-chain", "--dev", "-d"]) @@ -39,5 +39,5 @@ fn purge_chain_works() { // Make sure that the `dev` chain folder exists, but the `db` is deleted. assert!(base_path.path().join("chains/dev/").exists()); - assert!(!base_path.path().join("chains/dev/db").exists()); + assert!(!base_path.path().join("chains/dev/db/full").exists()); } diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index bfc7c6eb7bacc..36f267e4a3008 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -219,9 +219,14 @@ pub trait CliConfiguration: Sized { base_path: &PathBuf, cache_size: usize, database: Database, + role: &Role, ) -> Result { - let rocksdb_path = base_path.join("db"); - let paritydb_path = base_path.join("paritydb"); + let role_dir = match role { + Role::Light => "light", + Role::Full | Role::Authority => "full", + }; + let rocksdb_path = base_path.join("db").join(role_dir); + let paritydb_path = base_path.join("paritydb").join(role_dir); Ok(match database { Database::RocksDb => DatabaseSource::RocksDb { path: rocksdb_path, cache_size }, Database::ParityDb => DatabaseSource::ParityDb { path: rocksdb_path }, @@ -499,7 +504,7 @@ pub trait CliConfiguration: Sized { )?, keystore_remote, keystore, - database: self.database_config(&config_dir, database_cache_size, database)?, + database: self.database_config(&config_dir, database_cache_size, database, &role)?, state_cache_size: self.state_cache_size()?, state_cache_child_ratio: self.state_cache_child_ratio()?, state_pruning: self.state_pruning(unsafe_pruning, &role)?, diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index c7d6029c5356d..66adb64c0109e 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -355,7 +355,7 @@ pub enum DatabaseSource { } impl DatabaseSource { - /// Return dabase path for databases that are on the disk. + /// Return path for databases that are stored on disk. pub fn path(&self) -> Option<&Path> { match self { // as per https://github.com/paritytech/substrate/pull/9500#discussion_r684312550 @@ -367,6 +367,22 @@ impl DatabaseSource { DatabaseSource::Custom(..) => None, } } + + /// Set path for databases that are stored on disk. + pub fn set_path(&mut self, p: &Path) -> bool { + match self { + DatabaseSource::Auto { ref mut paritydb_path, .. } => { + *paritydb_path = p.into(); + true + }, + DatabaseSource::RocksDb { ref mut path, .. } | + DatabaseSource::ParityDb { ref mut path } => { + *path = p.into(); + true + }, + DatabaseSource::Custom(..) => false, + } + } } impl std::fmt::Display for DatabaseSource { diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 5c9c2ccdc51d9..0f3578ad99a37 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -186,7 +186,7 @@ mod tests { } } - fn open_database(db_path: &Path) -> sp_blockchain::Result<()> { + fn open_database(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> { crate::utils::open_database::( &DatabaseSettings { state_cache_size: 0, @@ -196,7 +196,7 @@ mod tests { keep_blocks: KeepBlocks::All, transaction_storage: TransactionStorageMode::BlockBody, }, - DatabaseType::Full, + db_type, ) .map(|_| ()) } @@ -205,25 +205,28 @@ mod tests { fn downgrade_never_happens() { let db_dir = tempfile::TempDir::new().unwrap(); create_db(db_dir.path(), Some(CURRENT_VERSION + 1)); - assert!(open_database(db_dir.path()).is_err()); + assert!(open_database(db_dir.path(), DatabaseType::Full).is_err()); } #[test] fn open_empty_database_works() { + let db_type = DatabaseType::Full; let db_dir = tempfile::TempDir::new().unwrap(); - open_database(db_dir.path()).unwrap(); - open_database(db_dir.path()).unwrap(); - assert_eq!(current_version(db_dir.path()).unwrap(), CURRENT_VERSION); + let db_dir = db_dir.path().join(db_type.as_str()); + open_database(&db_dir, db_type).unwrap(); + open_database(&db_dir, db_type).unwrap(); + assert_eq!(current_version(&db_dir).unwrap(), CURRENT_VERSION); } #[test] fn upgrade_to_3_works() { + let db_type = DatabaseType::Full; for version_from_file in &[None, Some(1), Some(2)] { let db_dir = tempfile::TempDir::new().unwrap(); - let db_path = db_dir.path(); - create_db(db_path, *version_from_file); - open_database(db_path).unwrap(); - assert_eq!(current_version(db_path).unwrap(), CURRENT_VERSION); + let db_path = db_dir.path().join(db_type.as_str()); + create_db(&db_path, *version_from_file); + open_database(&db_path, db_type).unwrap(); + assert_eq!(current_version(&db_path).unwrap(), CURRENT_VERSION); } } } diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 604a0e1328767..ea22c774f463e 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -19,9 +19,9 @@ //! Db-based backend utility structures and functions, used by both //! full and light storages. -use std::{convert::TryInto, fmt, io, path::Path, sync::Arc}; +use std::{convert::TryInto, fmt, fs, io, path::Path, sync::Arc}; -use log::debug; +use log::{debug, info}; use crate::{Database, DatabaseSettings, DatabaseSource, DbHash}; use codec::Decode; @@ -213,7 +213,21 @@ pub fn open_database( config: &DatabaseSettings, db_type: DatabaseType, ) -> sp_blockchain::Result>> { - let db: Arc> = match &config.source { + // Maybe migrate (copy) the database to a type specific subdirectory to make it + // possible that light and full databases coexist + // NOTE: This function can be removed in a few releases + maybe_migrate_to_type_subdir::(&config.source, db_type).map_err(|e| { + sp_blockchain::Error::Backend(format!("Error in migration to role subdirectory: {}", e)) + })?; + + open_database_at::(&config.source, db_type) +} + +fn open_database_at( + source: &DatabaseSource, + db_type: DatabaseType, +) -> sp_blockchain::Result>> { + let db: Arc> = match &source { DatabaseSource::ParityDb { path } => open_parity_db::(&path, db_type, true)?, DatabaseSource::RocksDb { path, cache_size } => open_kvdb_rocksdb::(&path, db_type, true, *cache_size)?, @@ -394,6 +408,46 @@ pub fn check_database_type( Ok(()) } +fn maybe_migrate_to_type_subdir( + source: &DatabaseSource, + db_type: DatabaseType, +) -> io::Result<()> { + if let Some(p) = source.path() { + let mut basedir = p.to_path_buf(); + basedir.pop(); + + // Do we have to migrate to a database-type-based subdirectory layout: + // See if there's a file identifying a rocksdb or paritydb folder in the parent dir and + // the target path ends in a role specific directory + if (basedir.join("db_version").exists() || basedir.join("metadata").exists()) && + (p.ends_with(DatabaseType::Full.as_str()) || + p.ends_with(DatabaseType::Light.as_str())) + { + // Try to open the database to check if the current `DatabaseType` matches the type of + // database stored in the target directory and close the database on success. + let mut old_source = source.clone(); + old_source.set_path(&basedir); + open_database_at::(&old_source, db_type) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + info!( + "Migrating database to a database-type-based subdirectory: '{:?}' -> '{:?}'", + basedir, + basedir.join(db_type.as_str()) + ); + let mut tmp_dir = basedir.clone(); + tmp_dir.pop(); + tmp_dir.push("tmp"); + + fs::rename(&basedir, &tmp_dir)?; + fs::create_dir_all(&p)?; + fs::rename(tmp_dir, &p)?; + } + } + + Ok(()) +} + /// Read database column entry for the given block. pub fn read_db( db: &dyn Database, @@ -570,8 +624,95 @@ mod tests { use codec::Input; use sc_state_db::PruningMode; use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; + use std::path::PathBuf; type Block = RawBlock>; + #[cfg(any(feature = "with-kvdb-rocksdb", test))] + #[test] + fn database_type_subdir_migration() { + type Block = RawBlock>; + + fn check_dir_for_db_type( + db_type: DatabaseType, + mut source: DatabaseSource, + db_check_file: &str, + ) { + let base_path = tempfile::TempDir::new().unwrap(); + let old_db_path = base_path.path().join("chains/dev/db"); + + source.set_path(&old_db_path); + let settings = db_settings(source.clone()); + + { + let db_res = open_database::(&settings, db_type); + assert!(db_res.is_ok(), "New database should be created."); + assert!(old_db_path.join(db_check_file).exists()); + assert!(!old_db_path.join(db_type.as_str()).join("db_version").exists()); + } + + source.set_path(&old_db_path.join(db_type.as_str())); + let settings = db_settings(source); + let db_res = open_database::(&settings, db_type); + assert!(db_res.is_ok(), "Reopening the db with the same role should work"); + // check if the database dir had been migrated + assert!(!old_db_path.join(db_check_file).exists()); + assert!(old_db_path.join(db_type.as_str()).join(db_check_file).exists()); + } + + check_dir_for_db_type( + DatabaseType::Light, + DatabaseSource::RocksDb { path: PathBuf::new(), cache_size: 128 }, + "db_version", + ); + check_dir_for_db_type( + DatabaseType::Full, + DatabaseSource::RocksDb { path: PathBuf::new(), cache_size: 128 }, + "db_version", + ); + + #[cfg(feature = "with-parity-db")] + check_dir_for_db_type( + DatabaseType::Light, + DatabaseSource::ParityDb { path: PathBuf::new() }, + "metadata", + ); + #[cfg(feature = "with-parity-db")] + check_dir_for_db_type( + DatabaseType::Full, + DatabaseSource::ParityDb { path: PathBuf::new() }, + "metadata", + ); + + // check failure on reopening with wrong role + { + let base_path = tempfile::TempDir::new().unwrap(); + let old_db_path = base_path.path().join("chains/dev/db"); + + let source = DatabaseSource::RocksDb { path: old_db_path.clone(), cache_size: 128 }; + let settings = db_settings(source); + { + let db_res = open_database::(&settings, DatabaseType::Full); + assert!(db_res.is_ok(), "New database should be created."); + + // check if the database dir had been migrated + assert!(old_db_path.join("db_version").exists()); + assert!(!old_db_path.join("light/db_version").exists()); + assert!(!old_db_path.join("full/db_version").exists()); + } + let source = DatabaseSource::RocksDb { + path: old_db_path.join(DatabaseType::Light.as_str()), + cache_size: 128, + }; + let settings = db_settings(source); + let db_res = open_database::(&settings, DatabaseType::Light); + assert!(db_res.is_err(), "Opening a light database in full role should fail"); + // assert nothing was changed + assert!(old_db_path.join("db_version").exists()); + assert!(!old_db_path.join("light/db_version").exists()); + assert!(!old_db_path.join("full/db_version").exists()); + } + } + #[test] fn number_index_key_doesnt_panic() { let id = BlockId::::Number(72340207214430721);