Skip to content

Commit

Permalink
Merge pull request #1 from haizhi-tech/v0.21.0-haizhi-event-listener-…
Browse files Browse the repository at this point in the history
…snapshot-fix

fix column family export bug
  • Loading branch information
liuyihua-cn authored Jun 14, 2024
2 parents 8478bea + a474ce1 commit 646f747
Show file tree
Hide file tree
Showing 29 changed files with 115 additions and 83 deletions.
2 changes: 1 addition & 1 deletion .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
[submodule "librocksdb-sys/rocksdb"]
path = librocksdb-sys/rocksdb
url = https://github.com/haizhi-tech/rocksdb.git
branch = v7.9.2_statistics_cf
branch = v8.1.1-hz
[submodule "librocksdb-sys/lz4"]
path = librocksdb-sys/lz4
url = https://github.com/lz4/lz4.git
5 changes: 4 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -45,4 +45,7 @@ trybuild = "1.0"
tempfile = "3.1"
pretty_assertions = "1.0"
bincode = "1.3"
serde = { version = "1", features = [ "derive" ] }
serde = { version = "1", features = [ "derive" ] }

[lib]
doctest = false
2 changes: 1 addition & 1 deletion librocksdb-sys/rocksdb
Submodule rocksdb updated 2 files
+66 −48 db/c.cc
+23 −20 include/rocksdb/c.h
11 changes: 11 additions & 0 deletions src/checkpoint.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,12 @@ pub struct RocksdbLevelMetaData {
file_creation_time: u64,
file_checksum: String,
file_checksum_func_name: String,
#[serde(default)]
epoch_number: u64,
#[serde(default)]
hex_smallest: String,
#[serde(default)]
hex_largest: String,
}

impl ExportImportFilesMetaData {
Expand Down Expand Up @@ -123,6 +129,8 @@ impl ExportImportFilesMetaData {
let hex_largestkey = CString::new(file.hex_largestkey).unwrap();
let file_checksum = CString::new(file.file_checksum).unwrap();
let file_checksum_func_name = CString::new(file.file_checksum_func_name).unwrap();
let hex_smallest = CString::new(file.hex_smallest).unwrap();
let hex_largest = CString::new(file.hex_largest).unwrap();
files.push(ffi_try!(ffi::rocksdb_new_live_file_metadata(
column_family_name.as_ptr(),
file.level,
Expand All @@ -147,6 +155,9 @@ impl ExportImportFilesMetaData {
file.file_creation_time,
file_checksum.as_ptr(),
file_checksum_func_name.as_ptr(),
file.epoch_number,
hex_smallest.as_ptr(),
hex_largest.as_ptr(),
)));
}

Expand Down
1 change: 1 addition & 0 deletions tests/fail/checkpoint_outlive_db.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use haizhi_rocksdb as rocksdb;
use rocksdb::{DB, checkpoint::Checkpoint};

fn main() {
Expand Down
11 changes: 6 additions & 5 deletions tests/fail/checkpoint_outlive_db.stderr
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
error[E0597]: `db` does not live long enough
--> tests/fail/checkpoint_outlive_db.rs:6:25
--> tests/fail/checkpoint_outlive_db.rs:7:25
|
4 | let _checkpoint = {
5 | let _checkpoint = {
| ----------- borrow later stored here
5 | let db = DB::open_default("foo").unwrap();
6 | Checkpoint::new(&db)
6 | let db = DB::open_default("foo").unwrap();
| -- binding `db` declared here
7 | Checkpoint::new(&db)
| ^^^ borrowed value does not live long enough
7 | };
8 | };
| - `db` dropped here while still borrowed
1 change: 1 addition & 0 deletions tests/fail/iterator_outlive_db.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use haizhi_rocksdb as rocksdb;
use rocksdb::{IteratorMode, DB};

fn main() {
Expand Down
13 changes: 7 additions & 6 deletions tests/fail/iterator_outlive_db.stderr
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
error[E0597]: `db` does not live long enough
--> tests/fail/iterator_outlive_db.rs:6:9
--> tests/fail/iterator_outlive_db.rs:7:9
|
4 | let _iter = {
5 | let _iter = {
| ----- borrow later stored here
5 | let db = DB::open_default("foo").unwrap();
6 | db.iterator(IteratorMode::Start)
| ^^ borrowed value does not live long enough
7 | };
6 | let db = DB::open_default("foo").unwrap();
| -- binding `db` declared here
7 | db.iterator(IteratorMode::Start)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ borrowed value does not live long enough
8 | };
| - `db` dropped here while still borrowed
1 change: 1 addition & 0 deletions tests/fail/open_with_multiple_refs_as_single_threaded.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use haizhi_rocksdb as rocksdb;
use rocksdb::{SingleThreaded, DBWithThreadMode, Options};

fn main() {
Expand Down
30 changes: 17 additions & 13 deletions tests/fail/open_with_multiple_refs_as_single_threaded.stderr
Original file line number Diff line number Diff line change
@@ -1,17 +1,21 @@
error[E0596]: cannot borrow `*db_ref1` as mutable, as it is behind a `&` reference
--> tests/fail/open_with_multiple_refs_as_single_threaded.rs:8:5
--> tests/fail/open_with_multiple_refs_as_single_threaded.rs:9:5
|
9 | db_ref1.create_cf("cf1", &opts).unwrap();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `db_ref1` is a `&` reference, so the data it refers to cannot be borrowed as mutable
|
help: consider changing this to be a mutable reference
|
5 | let db_ref1 = &db;
| --- help: consider changing this to be a mutable reference: `&mut db`
...
8 | db_ref1.create_cf("cf1", &opts).unwrap();
| ^^^^^^^ `db_ref1` is a `&` reference, so the data it refers to cannot be borrowed as mutable
6 | let db_ref1 = &mut db;
| ~~~~~~~

error[E0596]: cannot borrow `*db_ref2` as mutable, as it is behind a `&` reference
--> tests/fail/open_with_multiple_refs_as_single_threaded.rs:9:5
|
6 | let db_ref2 = &db;
| --- help: consider changing this to be a mutable reference: `&mut db`
...
9 | db_ref2.create_cf("cf2", &opts).unwrap();
| ^^^^^^^ `db_ref2` is a `&` reference, so the data it refers to cannot be borrowed as mutable
--> tests/fail/open_with_multiple_refs_as_single_threaded.rs:10:5
|
10 | db_ref2.create_cf("cf2", &opts).unwrap();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `db_ref2` is a `&` reference, so the data it refers to cannot be borrowed as mutable
|
help: consider changing this to be a mutable reference
|
7 | let db_ref2 = &mut db;
| ~~~~~~~
1 change: 1 addition & 0 deletions tests/fail/snapshot_outlive_db.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use haizhi_rocksdb as rocksdb;
use rocksdb::DB;

fn main() {
Expand Down
13 changes: 7 additions & 6 deletions tests/fail/snapshot_outlive_db.stderr
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
error[E0597]: `db` does not live long enough
--> tests/fail/snapshot_outlive_db.rs:6:9
--> tests/fail/snapshot_outlive_db.rs:7:9
|
4 | let _snapshot = {
5 | let _snapshot = {
| --------- borrow later stored here
5 | let db = DB::open_default("foo").unwrap();
6 | db.snapshot()
| ^^ borrowed value does not live long enough
7 | };
6 | let db = DB::open_default("foo").unwrap();
| -- binding `db` declared here
7 | db.snapshot()
| ^^^^^^^^^^^^^ borrowed value does not live long enough
8 | };
| - `db` dropped here while still borrowed
1 change: 1 addition & 0 deletions tests/fail/snapshot_outlive_transaction.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use haizhi_rocksdb as rocksdb;
use rocksdb::{TransactionDB, SingleThreaded};

fn main() {
Expand Down
11 changes: 6 additions & 5 deletions tests/fail/snapshot_outlive_transaction.stderr
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
error[E0597]: `txn` does not live long enough
--> tests/fail/snapshot_outlive_transaction.rs:7:9
--> tests/fail/snapshot_outlive_transaction.rs:8:9
|
5 | let _snapshot = {
6 | let _snapshot = {
| --------- borrow later stored here
6 | let txn = db.transaction();
7 | txn.snapshot()
7 | let txn = db.transaction();
| --- binding `txn` declared here
8 | txn.snapshot()
| ^^^^^^^^^^^^^^ borrowed value does not live long enough
8 | };
9 | };
| - `txn` dropped here while still borrowed
1 change: 1 addition & 0 deletions tests/fail/snapshot_outlive_transaction_db.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use haizhi_rocksdb as rocksdb;
use rocksdb::{TransactionDB, SingleThreaded};

fn main() {
Expand Down
11 changes: 6 additions & 5 deletions tests/fail/snapshot_outlive_transaction_db.stderr
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
error[E0597]: `db` does not live long enough
--> tests/fail/snapshot_outlive_transaction_db.rs:6:9
--> tests/fail/snapshot_outlive_transaction_db.rs:7:9
|
4 | let _snapshot = {
5 | let _snapshot = {
| --------- borrow later stored here
5 | let db = TransactionDB::<SingleThreaded>::open_default("foo").unwrap();
6 | db.snapshot()
6 | let db = TransactionDB::<SingleThreaded>::open_default("foo").unwrap();
| -- binding `db` declared here
7 | db.snapshot()
| ^^^^^^^^^^^^^ borrowed value does not live long enough
7 | };
8 | };
| - `db` dropped here while still borrowed
1 change: 1 addition & 0 deletions tests/fail/transaction_outlive_transaction_db.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use haizhi_rocksdb as rocksdb;
use rocksdb::{TransactionDB, SingleThreaded};

fn main() {
Expand Down
11 changes: 6 additions & 5 deletions tests/fail/transaction_outlive_transaction_db.stderr
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
error[E0597]: `db` does not live long enough
--> tests/fail/transaction_outlive_transaction_db.rs:6:9
--> tests/fail/transaction_outlive_transaction_db.rs:7:9
|
4 | let _txn = {
5 | let _txn = {
| ---- borrow later stored here
5 | let db = TransactionDB::<SingleThreaded>::open_default("foo").unwrap();
6 | db.transaction()
6 | let db = TransactionDB::<SingleThreaded>::open_default("foo").unwrap();
| -- binding `db` declared here
7 | db.transaction()
| ^^^^^^^^^^^^^^^^ borrowed value does not live long enough
7 | };
8 | };
| - `db` dropped here while still borrowed
3 changes: 2 additions & 1 deletion tests/test_approximate.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use haizhi_rocksdb as rocksdb;
use std::time::Duration;

use haizhi_rocksdb as rocksdb;

use rocksdb::Ranges;
use rocksdb::{ColumnFamilyDescriptor, Options, DB};
#[test]
Expand Down
4 changes: 3 additions & 1 deletion tests/test_backup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@ mod util;

use pretty_assertions::assert_eq;

use haizhi_rocksdb::{
use haizhi_rocksdb as rocksdb;

use rocksdb::{
backup::{BackupEngine, BackupEngineOptions, RestoreOptions},
Env, DB,
};
Expand Down
3 changes: 2 additions & 1 deletion tests/test_checkpoint.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,10 @@

mod util;

use haizhi_rocksdb as rocksdb;
use pretty_assertions::assert_eq;

use haizhi_rocksdb as rocksdb;

use rocksdb::{
checkpoint::{Checkpoint, ExportImportFilesMetaData},
Options, DB,
Expand Down
42 changes: 18 additions & 24 deletions tests/test_column_family.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,10 @@

mod util;

use haizhi_rocksdb as rocksdb;
use pretty_assertions::assert_eq;

use haizhi_rocksdb as rocksdb;

use rocksdb::checkpoint::{Checkpoint, ExportImportFilesMetaData};
use rocksdb::{ColumnFamilyDescriptor, MergeOperands, Options, DB, DEFAULT_COLUMN_FAMILY_NAME};
use rocksdb::{TransactionDB, TransactionDBOptions};
Expand Down Expand Up @@ -503,10 +504,11 @@ fn test_no_leaked_column_family() {

#[test]
fn test_create_cf_with_import() {
const PATH_PREFIX: &str = "_rust_rocksdb_create_cf_with_import_";

const PATH_PREFIX: &str = "/tmp/_rust_rocksdb_create_cf_with_import_";
let _ = std::fs::remove_dir_all(PATH_PREFIX);
// Create DB with some data
let origin_db_path = DBPath::new(&format!("{}db1", PATH_PREFIX));
let origin_db_string_path = format!("{}/db1", PATH_PREFIX);
let origin_db_path = Path::new(&origin_db_string_path);

let mut opts = Options::default();
opts.create_if_missing(true);
Expand All @@ -524,22 +526,29 @@ fn test_create_cf_with_import() {
assert!(origin_db.put_cf(&cf1, b"1", b"1").is_ok());
let cf2 = origin_db.cf_handle("cf2").unwrap();
assert!(origin_db.put_cf(&cf2, b"2", b"2").is_ok());
// add some keys and call delete range
assert!(origin_db.put_cf(&cf1, b"a1", b"a1").is_ok());
assert!(origin_db.put_cf(&cf1, b"a2", b"a2").is_ok());
assert!(origin_db.delete_range_cf(&cf1, b"a0", b"a5").is_ok());

let checkpoint = Checkpoint::new(&origin_db);
assert!(checkpoint.is_ok());
let checkpoint = checkpoint.unwrap();

let export_path = DBPath::new(&format!("{}db1_backup", PATH_PREFIX));
let export_path = format!("{}/db1_backup", PATH_PREFIX);
let export_path = Path::new(&export_path);
let result = checkpoint.export_column_family(cf1, &export_path);
assert!(result.is_ok());
drop(checkpoint);

let origin_metadata = result.unwrap();
let metadata_path = Path::new("/tmp/db1_metadata.json");
origin_metadata.save(metadata_path).unwrap();
let recover_metadata = ExportImportFilesMetaData::load(metadata_path).unwrap();
let recover_metadata =
ExportImportFilesMetaData::load(metadata_path, origin_db_string_path.clone()).unwrap();
// new db from export path
let recover_db_path = DBPath::new(&format!("{}db1_recover", PATH_PREFIX));
let recover_db_path = format!("{}/db1_recover", PATH_PREFIX);
let recover_db_path = Path::new(&recover_db_path);
let mut recover_db = DB::open(&opts, &recover_db_path).unwrap();
assert!(recover_db.cf_handle("cf1").is_none());
assert!(recover_db.cf_handle("cf2").is_none());
Expand All @@ -553,23 +562,6 @@ fn test_create_cf_with_import() {
);
assert!(recover_db.cf_handle("cf2").is_none());
assert!(recover_db.get_cf(&cf1, b"2").unwrap().is_none());
// then we will test origin db
assert!(origin_db.put_cf(&cf1, b"11", b"11").is_ok());
assert!(origin_db.drop_cf("cf1").is_ok());
// cf1 and its data are none
assert!(origin_db.cf_handle("cf1").is_none());
// import cf1
assert!(origin_db
.create_cf_with_import("cf1", &opts, &recover_metadata)
.is_ok());
let cf1 = origin_db.cf_handle("cf1").unwrap();
assert_eq!(
origin_db.get_cf(&cf1, vec![1]).unwrap().unwrap(),
b"illegal1"
);
assert_eq!(origin_db.get_cf(&cf1, b"1").unwrap().unwrap(), b"1");
assert!(origin_db.get_cf(&cf1, b"11").unwrap().is_none());
assert!(origin_db.put_cf(&cf1, b"11", b"11").is_ok());
// import cf3
assert!(origin_db
.create_cf_with_import("cf3", &opts, &recover_metadata)
Expand All @@ -581,4 +573,6 @@ fn test_create_cf_with_import() {
);
assert_eq!(origin_db.get_cf(&cf3, b"1").unwrap().unwrap(), b"1");
assert!(origin_db.get_cf(&cf3, b"11").unwrap().is_none());
assert!(origin_db.get_cf(&cf3, b"a1").unwrap().is_none());
let _ = std::fs::remove_dir_all(PATH_PREFIX);
}
2 changes: 2 additions & 0 deletions tests/test_comparator.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use haizhi_rocksdb as rocksdb;

use rocksdb::{Options, DB};
use std::cmp::Ordering;
use std::iter::FromIterator;
Expand Down
2 changes: 1 addition & 1 deletion tests/test_compationfilter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
// limitations under the License.

mod util;

use haizhi_rocksdb as rocksdb;

use pretty_assertions::assert_eq;

use rocksdb::{CompactionDecision, Options, DB};
Expand Down
7 changes: 4 additions & 3 deletions tests/test_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,11 @@

mod util;

use haizhi_rocksdb as rocksdb;

use std::convert::TryInto;
use std::{mem, sync::Arc, thread, time::Duration};

use haizhi_rocksdb as rocksdb;
use pretty_assertions::assert_eq;

use rocksdb::{
Expand Down Expand Up @@ -851,7 +852,7 @@ fn get_with_cache_and_bulkload_test() {

{
// set block based table and cache
let cache = Cache::new_lru_cache(512 << 10);
let cache = Cache::new_lru_cache(512 << 10).unwrap();
assert_eq!(cache.get_usage(), 0);
let mut block_based_opts = BlockBasedOptions::default();
block_based_opts.set_block_cache(&cache);
Expand Down Expand Up @@ -986,7 +987,7 @@ fn get_with_cache_and_bulkload_and_blobs_test() {

{
// set block based table and cache
let cache = Cache::new_lru_cache(512 << 10);
let cache = Cache::new_lru_cache(512 << 10).unwrap();
assert_eq!(cache.get_usage(), 0);
let mut block_based_opts = BlockBasedOptions::default();
block_based_opts.set_block_cache(&cache);
Expand Down
Loading

0 comments on commit 646f747

Please sign in to comment.