From 4fb026446575003d5f0cdf1414e20f4c99ba5369 Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 30 Jul 2019 17:20:15 +0200 Subject: [PATCH 1/7] inject_batch && commit_batch are no longer a part of journaldb --- util/journaldb/src/archivedb.rs | 98 ++++++------- util/journaldb/src/earlymergedb.rs | 189 +++++++++++++------------- util/journaldb/src/lib.rs | 24 ++++ util/journaldb/src/overlayrecentdb.rs | 188 ++++++++++++------------- util/journaldb/src/refcounteddb.rs | 44 +++--- util/journaldb/src/traits.rs | 23 ---- 6 files changed, 283 insertions(+), 283 deletions(-) diff --git a/util/journaldb/src/archivedb.rs b/util/journaldb/src/archivedb.rs index ed357d80d6b..9d6d2453c45 100644 --- a/util/journaldb/src/archivedb.rs +++ b/util/journaldb/src/archivedb.rs @@ -208,7 +208,7 @@ mod tests { use keccak::keccak; use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; - use {kvdb_memorydb, JournalDB}; + use {kvdb_memorydb, JournalDB, inject_batch, commit_batch}; #[test] fn insert_same_in_fork() { @@ -216,18 +216,18 @@ mod tests { let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let x = jdb.insert(EMPTY_PREFIX, b"X"); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); - jdb.commit_batch(3, &keccak(b"1002a"), Some((1, keccak(b"1")))).unwrap(); - jdb.commit_batch(4, &keccak(b"1003a"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"1002a"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"1003a"), Some((2, keccak(b"2")))).unwrap(); jdb.remove(&x, EMPTY_PREFIX); - jdb.commit_batch(3, &keccak(b"1002b"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"1002b"), Some((1, keccak(b"1")))).unwrap(); let x = jdb.insert(EMPTY_PREFIX, b"X"); - jdb.commit_batch(4, &keccak(b"1003b"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"1003b"), Some((2, keccak(b"2")))).unwrap(); - jdb.commit_batch(5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))).unwrap(); - jdb.commit_batch(6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))).unwrap(); + commit_batch(&mut jdb, 6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))).unwrap(); assert!(jdb.contains(&x, EMPTY_PREFIX)); } @@ -237,16 +237,16 @@ mod tests { // history is 3 let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let h = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); jdb.remove(&h, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); - jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); - jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); } @@ -255,13 +255,13 @@ mod tests { fn multiple_owed_removal_not_allowed() { let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let h = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); jdb.remove(&h, EMPTY_PREFIX); jdb.remove(&h, EMPTY_PREFIX); // commit_batch would call journal_under(), // and we don't allow multiple owned removals. - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); } #[test] @@ -271,29 +271,29 @@ mod tests { let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); jdb.remove(&bar, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); jdb.remove(&baz, EMPTY_PREFIX); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); - jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); } #[test] @@ -303,22 +303,22 @@ mod tests { let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); jdb.remove(&bar, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); } @@ -328,16 +328,16 @@ mod tests { let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); jdb.insert(EMPTY_PREFIX, b"foo"); assert!(jdb.contains(&foo, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); - jdb.commit_batch(3, &keccak(b"2"), Some((0, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"2"), Some((0, keccak(b"2")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); } @@ -345,16 +345,16 @@ mod tests { fn fork_same_key() { // history is 1 let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2a"), Some((1, keccak(b"1a")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2a"), Some((1, keccak(b"1a")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); } @@ -368,21 +368,21 @@ mod tests { // history is 1 let foo = jdb.insert(EMPTY_PREFIX, b"foo"); jdb.emplace(bar.clone(), EMPTY_PREFIX, DBValue::from_slice(b"bar")); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); foo }; { let mut jdb = ArchiveDB::new(shared_db.clone(), None); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); } { let mut jdb = ArchiveDB::new(shared_db, None); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); } } @@ -394,24 +394,24 @@ mod tests { let mut jdb = ArchiveDB::new(shared_db.clone(), None); // history is 1 let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); // foo is ancient history. jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); foo }; { let mut jdb = ArchiveDB::new(shared_db, None); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); - jdb.commit_batch(5, &keccak(b"5"), Some((4, keccak(b"4")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"5"), Some((4, keccak(b"4")))).unwrap(); } } @@ -423,19 +423,19 @@ mod tests { // history is 1 let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); jdb.remove(&foo, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); jdb.remove(&bar, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); (foo, bar, baz) }; { let mut jdb = ArchiveDB::new(shared_db, None); - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); } } @@ -447,7 +447,7 @@ mod tests { let key = { let mut jdb = ArchiveDB::new(shared_db.clone(), None); let key = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); key }; @@ -462,11 +462,11 @@ mod tests { fn inject() { let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let key = jdb.insert(EMPTY_PREFIX, b"dog"); - jdb.inject_batch().unwrap(); + inject_batch(&mut jdb).unwrap(); assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog")); jdb.remove(&key, EMPTY_PREFIX); - jdb.inject_batch().unwrap(); + inject_batch(&mut jdb).unwrap(); assert!(jdb.get(&key, EMPTY_PREFIX).is_none()); } diff --git a/util/journaldb/src/earlymergedb.rs b/util/journaldb/src/earlymergedb.rs index 0679bd5908d..65b37c95ca2 100644 --- a/util/journaldb/src/earlymergedb.rs +++ b/util/journaldb/src/earlymergedb.rs @@ -527,8 +527,7 @@ mod tests { use keccak::keccak; use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; - use super::super::traits::JournalDB; - use kvdb_memorydb; + use {kvdb_memorydb, inject_batch, commit_batch}; #[test] fn insert_same_in_fork() { @@ -536,25 +535,25 @@ mod tests { let mut jdb = new_db(); let x = jdb.insert(EMPTY_PREFIX, b"X"); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(3, &keccak(b"1002a"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"1002a"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &keccak(b"1003a"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"1003a"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&x, EMPTY_PREFIX); - jdb.commit_batch(3, &keccak(b"1002b"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"1002b"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); let x = jdb.insert(EMPTY_PREFIX, b"X"); - jdb.commit_batch(4, &keccak(b"1003b"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"1003b"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))).unwrap(); + commit_batch(&mut jdb, 6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&x, EMPTY_PREFIX)); @@ -564,17 +563,17 @@ mod tests { fn insert_older_era() { let mut jdb = new_db(); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0a"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0a")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar, EMPTY_PREFIX); - jdb.commit_batch(0, &keccak(b"0b"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); @@ -585,20 +584,20 @@ mod tests { // history is 3 let mut jdb = new_db(); let h = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h, EMPTY_PREFIX)); jdb.remove(&h, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h, EMPTY_PREFIX)); - jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h, EMPTY_PREFIX)); - jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&h, EMPTY_PREFIX)); } @@ -610,7 +609,7 @@ mod tests { let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); @@ -618,7 +617,7 @@ mod tests { jdb.remove(&foo, EMPTY_PREFIX); jdb.remove(&bar, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); @@ -626,20 +625,20 @@ mod tests { let foo = jdb.insert(EMPTY_PREFIX, b"foo"); jdb.remove(&baz, EMPTY_PREFIX); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); - jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); @@ -653,25 +652,25 @@ mod tests { let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); @@ -684,19 +683,19 @@ mod tests { let mut jdb = new_db(); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); assert!(jdb.contains(&foo, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); - jdb.commit_batch(3, &keccak(b"2"), Some((0, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"2"), Some((0, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); } @@ -705,24 +704,24 @@ mod tests { fn fork_same_key_one() { let mut jdb = new_db(); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2a"), Some((1, keccak(b"1a")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2a"), Some((1, keccak(b"1a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); } @@ -730,24 +729,24 @@ mod tests { #[test] fn fork_same_key_other() { let mut jdb = new_db(); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); } @@ -755,33 +754,33 @@ mod tests { #[test] fn fork_ins_del_ins() { let mut jdb = new_db(); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(2, &keccak(b"2a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(2, &keccak(b"2b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(3, &keccak(b"3a"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3a"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(3, &keccak(b"3b"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3b"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &keccak(b"4a"), Some((2, keccak(b"2a")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4a"), Some((2, keccak(b"2a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(5, &keccak(b"5a"), Some((3, keccak(b"3a")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"5a"), Some((3, keccak(b"3a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -800,7 +799,7 @@ mod tests { // history is 1 let foo = jdb.insert(EMPTY_PREFIX, b"foo"); jdb.emplace(bar.clone(), EMPTY_PREFIX, DBValue::from_slice(b"bar")); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); foo }; @@ -808,7 +807,7 @@ mod tests { { let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -816,7 +815,7 @@ mod tests { let mut jdb = EarlyMergeDB::new(shared_db, None); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo, EMPTY_PREFIX)); } @@ -830,22 +829,22 @@ mod tests { // history is 4 let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // expunge foo - jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -856,43 +855,43 @@ mod tests { // history is 4 let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1a"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1b"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(2, &keccak(b"2a"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(2, &keccak(b"2b"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(3, &keccak(b"3a"), None).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(3, &keccak(b"3b"), None).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(4, &keccak(b"4a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(4, &keccak(b"4b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // expunge foo - jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1a")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"5"), Some((1, keccak(b"1a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -902,25 +901,25 @@ mod tests { // history is 1 let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); // BROKEN + commit_batch(&mut jdb, 3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); // BROKEN assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(5, &keccak(b"5"), Some((4, keccak(b"4")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"5"), Some((4, keccak(b"4")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo, EMPTY_PREFIX)); } @@ -931,30 +930,30 @@ mod tests { // history is 4 let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); jdb.remove(&bar, EMPTY_PREFIX); - jdb.commit_batch(6, &keccak(b"6"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 6, &keccak(b"6"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(7, &keccak(b"7"), Some((3, keccak(b"3")))).unwrap(); + commit_batch(&mut jdb, 7, &keccak(b"7"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -969,20 +968,20 @@ mod tests { let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); // history is 1 jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(2, &keccak(b"2"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(3, &keccak(b"3"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); @@ -991,7 +990,7 @@ mod tests { let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); @@ -999,7 +998,7 @@ mod tests { }; { let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); - jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); @@ -1007,7 +1006,7 @@ mod tests { }; { let mut jdb = EarlyMergeDB::new(shared_db, None); - jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap(); + commit_batch(&mut jdb, 6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo, EMPTY_PREFIX)); } @@ -1022,22 +1021,22 @@ mod tests { // history is 1 let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); (foo, bar, baz) }; { let mut jdb = EarlyMergeDB::new(shared_db, None); - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); @@ -1049,11 +1048,11 @@ mod tests { fn inject() { let mut jdb = new_db(); let key = jdb.insert(EMPTY_PREFIX, b"dog"); - jdb.inject_batch().unwrap(); + inject_batch(&mut jdb).unwrap(); assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog")); jdb.remove(&key, EMPTY_PREFIX); - jdb.inject_batch().unwrap(); + inject_batch(&mut jdb).unwrap(); assert!(jdb.get(&key, EMPTY_PREFIX).is_none()); } diff --git a/util/journaldb/src/lib.rs b/util/journaldb/src/lib.rs index 0cd362ce241..c63164fd14d 100644 --- a/util/journaldb/src/lib.rs +++ b/util/journaldb/src/lib.rs @@ -41,6 +41,7 @@ extern crate kvdb_memorydb; use std::{fmt, str, io}; use std::sync::Arc; +use ethereum_types::H256; /// Export the journaldb module. mod traits; @@ -177,6 +178,29 @@ pub fn new_memory_db() -> MemoryDB { MemoryDB::from_null_node(&rlp::NULL_RLP, rlp::NULL_RLP.as_ref().into()) } +#[cfg(test)] +/// Inject all changes in a single batch. +pub fn inject_batch(jdb: &mut dyn JournalDB) -> io::Result { + let mut batch = jdb.backing().transaction(); + let res = jdb.inject(&mut batch)?; + jdb.backing().write(batch).map(|_| res).map_err(Into::into) +} + +/// Commit all changes in a single batch +#[cfg(test)] +fn commit_batch(jdb: &mut dyn JournalDB, now: u64, id: &H256, end: Option<(u64, H256)>) -> io::Result { + let mut batch = jdb.backing().transaction(); + let mut ops = jdb.journal_under(&mut batch, now, id)?; + + if let Some((end_era, canon_id)) = end { + ops += jdb.mark_canonical(&mut batch, end_era, &canon_id)?; + } + + let result = jdb.backing().write(batch).map(|_| ops).map_err(Into::into); + jdb.flush(); + result +} + #[cfg(test)] mod tests { use super::Algorithm; diff --git a/util/journaldb/src/overlayrecentdb.rs b/util/journaldb/src/overlayrecentdb.rs index 3a1e7d293f4..0209f2c975b 100644 --- a/util/journaldb/src/overlayrecentdb.rs +++ b/util/journaldb/src/overlayrecentdb.rs @@ -494,7 +494,7 @@ mod tests { use keccak::keccak; use super::*; use hash_db::{HashDB, EMPTY_PREFIX}; - use {kvdb_memorydb, JournalDB}; + use {kvdb_memorydb, JournalDB, inject_batch, commit_batch}; fn new_db() -> OverlayRecentDB { let backing = Arc::new(kvdb_memorydb::create(0)); @@ -507,25 +507,25 @@ mod tests { let mut jdb = new_db(); let x = jdb.insert(EMPTY_PREFIX, b"X"); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(3, &keccak(b"1002a"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"1002a"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &keccak(b"1003a"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"1003a"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&x, EMPTY_PREFIX); - jdb.commit_batch(3, &keccak(b"1002b"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"1002b"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); let x = jdb.insert(EMPTY_PREFIX, b"X"); - jdb.commit_batch(4, &keccak(b"1003b"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"1003b"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"1004a"), Some((3, keccak(b"1002a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))).unwrap(); + commit_batch(&mut jdb, 6, &keccak(b"1005a"), Some((4, keccak(b"1003a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&x, EMPTY_PREFIX)); @@ -536,20 +536,20 @@ mod tests { // history is 3 let mut jdb = new_db(); let h = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h, EMPTY_PREFIX)); jdb.remove(&h, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h, EMPTY_PREFIX)); - jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&h, EMPTY_PREFIX)); - jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&h, EMPTY_PREFIX)); } @@ -561,7 +561,7 @@ mod tests { let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); @@ -569,7 +569,7 @@ mod tests { jdb.remove(&foo, EMPTY_PREFIX); jdb.remove(&bar, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); @@ -577,20 +577,20 @@ mod tests { let foo = jdb.insert(EMPTY_PREFIX, b"foo"); jdb.remove(&baz, EMPTY_PREFIX); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); - jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); @@ -604,25 +604,25 @@ mod tests { let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); @@ -635,19 +635,19 @@ mod tests { let mut jdb = new_db(); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); assert!(jdb.contains(&foo, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); - jdb.commit_batch(3, &keccak(b"2"), Some((0, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"2"), Some((0, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); } @@ -655,24 +655,24 @@ mod tests { #[test] fn fork_same_key_one() { let mut jdb = new_db(); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2a"), Some((1, keccak(b"1a")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2a"), Some((1, keccak(b"1a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); } @@ -681,24 +681,24 @@ mod tests { fn fork_same_key_other() { let mut jdb = new_db(); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1c"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); } @@ -707,33 +707,33 @@ mod tests { fn fork_ins_del_ins() { let mut jdb = new_db(); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(2, &keccak(b"2a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(2, &keccak(b"2b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(3, &keccak(b"3a"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3a"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(3, &keccak(b"3b"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3b"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &keccak(b"4a"), Some((2, keccak(b"2a")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4a"), Some((2, keccak(b"2a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(5, &keccak(b"5a"), Some((3, keccak(b"3a")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"5a"), Some((3, keccak(b"3a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -747,7 +747,7 @@ mod tests { // history is 1 let foo = jdb.insert(EMPTY_PREFIX, b"foo"); jdb.emplace(bar.clone(), EMPTY_PREFIX, DBValue::from_slice(b"bar")); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); foo }; @@ -755,7 +755,7 @@ mod tests { { let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -763,7 +763,7 @@ mod tests { let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo, EMPTY_PREFIX)); } @@ -776,22 +776,22 @@ mod tests { // history is 4 let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // expunge foo - jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -802,43 +802,43 @@ mod tests { // history is 4 let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1a"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1b"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(2, &keccak(b"2a"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(2, &keccak(b"2b"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(3, &keccak(b"3a"), None).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(3, &keccak(b"3b"), None).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(4, &keccak(b"4a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(4, &keccak(b"4b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // expunge foo - jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1a")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"5"), Some((1, keccak(b"1a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -847,25 +847,25 @@ mod tests { let mut jdb = new_db(); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); // BROKEN + commit_batch(&mut jdb, 3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); // BROKEN assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(5, &keccak(b"5"), Some((4, keccak(b"4")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"5"), Some((4, keccak(b"4")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo, EMPTY_PREFIX)); } @@ -875,30 +875,30 @@ mod tests { let mut jdb = new_db(); // history is 4 let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(3, &keccak(b"3"), None).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"5"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); jdb.remove(&bar, EMPTY_PREFIX); - jdb.commit_batch(6, &keccak(b"6"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 6, &keccak(b"6"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.insert(EMPTY_PREFIX, b"foo"); jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(7, &keccak(b"7"), Some((3, keccak(b"3")))).unwrap(); + commit_batch(&mut jdb, 7, &keccak(b"7"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); } @@ -913,20 +913,20 @@ mod tests { let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); // history is 1 jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); // foo is ancient history. jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(2, &keccak(b"2"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(3, &keccak(b"3"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); @@ -935,7 +935,7 @@ mod tests { let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); @@ -943,7 +943,7 @@ mod tests { }; { let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); - jdb.commit_batch(5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap(); + commit_batch(&mut jdb, 5, &keccak(b"5"), Some((3, keccak(b"3")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); @@ -951,7 +951,7 @@ mod tests { }; { let mut jdb = OverlayRecentDB::new(shared_db, None); - jdb.commit_batch(6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap(); + commit_batch(&mut jdb, 6, &keccak(b"6"), Some((4, keccak(b"4")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(!jdb.contains(&foo, EMPTY_PREFIX)); } @@ -966,22 +966,22 @@ mod tests { // history is 1 let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.can_reconstruct_refs()); (foo, bar, baz) }; { let mut jdb = OverlayRecentDB::new(shared_db, None); - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); @@ -993,17 +993,17 @@ mod tests { fn insert_older_era() { let mut jdb = new_db(); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0a"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0a"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0a")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0a")))).unwrap(); assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar, EMPTY_PREFIX); - jdb.commit_batch(0, &keccak(b"0b"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0b"), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); @@ -1013,11 +1013,11 @@ mod tests { fn inject() { let mut jdb = new_db(); let key = jdb.insert(EMPTY_PREFIX, b"dog"); - jdb.inject_batch().unwrap(); + inject_batch(&mut jdb).unwrap(); assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog")); jdb.remove(&key, EMPTY_PREFIX); - jdb.inject_batch().unwrap(); + inject_batch(&mut jdb).unwrap(); assert!(jdb.get(&key, EMPTY_PREFIX).is_none()); } diff --git a/util/journaldb/src/refcounteddb.rs b/util/journaldb/src/refcounteddb.rs index 4aef98c302c..bb312b241bd 100644 --- a/util/journaldb/src/refcounteddb.rs +++ b/util/journaldb/src/refcounteddb.rs @@ -221,7 +221,7 @@ mod tests { use keccak::keccak; use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; - use {JournalDB, kvdb_memorydb}; + use {JournalDB, kvdb_memorydb, inject_batch, commit_batch}; fn new_db() -> RefCountedDB { let backing = Arc::new(kvdb_memorydb::create(0)); @@ -233,16 +233,16 @@ mod tests { // history is 3 let mut jdb = new_db(); let h = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); jdb.remove(&h, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); - jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); - jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(!jdb.contains(&h, EMPTY_PREFIX)); } @@ -252,16 +252,16 @@ mod tests { let mut jdb = new_db(); assert_eq!(jdb.latest_era(), None); let h = jdb.insert(EMPTY_PREFIX, b"foo"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(0)); jdb.remove(&h, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(1)); - jdb.commit_batch(2, &keccak(b"2"), None).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(2)); - jdb.commit_batch(3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert_eq!(jdb.latest_era(), Some(3)); - jdb.commit_batch(4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert_eq!(jdb.latest_era(), Some(4)); } @@ -272,32 +272,32 @@ mod tests { let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); jdb.remove(&bar, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); - jdb.commit_batch(1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); jdb.remove(&baz, EMPTY_PREFIX); - jdb.commit_batch(2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); - jdb.commit_batch(3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); + commit_batch(&mut jdb, 3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); - jdb.commit_batch(4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); + commit_batch(&mut jdb, 4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); assert!(!jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); @@ -310,22 +310,22 @@ mod tests { let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); - jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); + commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); - jdb.commit_batch(1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); jdb.remove(&bar, EMPTY_PREFIX); - jdb.commit_batch(1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); + commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); - jdb.commit_batch(2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); + commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); @@ -335,11 +335,11 @@ mod tests { fn inject() { let mut jdb = new_db(); let key = jdb.insert(EMPTY_PREFIX, b"dog"); - jdb.inject_batch().unwrap(); + inject_batch(&mut jdb).unwrap(); assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog")); jdb.remove(&key, EMPTY_PREFIX); - jdb.inject_batch().unwrap(); + inject_batch(&mut jdb).unwrap(); assert!(jdb.get(&key, EMPTY_PREFIX).is_none()); } diff --git a/util/journaldb/src/traits.rs b/util/journaldb/src/traits.rs index 5114074a544..10a56df22a8 100644 --- a/util/journaldb/src/traits.rs +++ b/util/journaldb/src/traits.rs @@ -94,27 +94,4 @@ pub trait JournalDB: KeyedHashDB { /// Consolidate all the insertions and deletions in the given memory overlay. fn consolidate(&mut self, overlay: super::MemoryDB); - - /// Commit all changes in a single batch - #[cfg(test)] - fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> io::Result { - let mut batch = self.backing().transaction(); - let mut ops = self.journal_under(&mut batch, now, id)?; - - if let Some((end_era, canon_id)) = end { - ops += self.mark_canonical(&mut batch, end_era, &canon_id)?; - } - - let result = self.backing().write(batch).map(|_| ops).map_err(Into::into); - self.flush(); - result - } - - /// Inject all changes in a single batch. - #[cfg(test)] - fn inject_batch(&mut self) -> io::Result { - let mut batch = self.backing().transaction(); - let res = self.inject(&mut batch)?; - self.backing().write(batch).map(|_| res).map_err(Into::into) - } } From 4af8cabf1d88ad22cc5649c7335cecf3d379c7ce Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 30 Jul 2019 19:58:17 +0200 Subject: [PATCH 2/7] get rid of redundant KeyedHashDB trait --- ethcore/account-state/src/backend.rs | 19 +------------ util/journaldb/src/archivedb.rs | 39 ++++++++++++-------------- util/journaldb/src/as_hash_db_impls.rs | 21 -------------- util/journaldb/src/earlymergedb.rs | 38 ++++++++++++------------- util/journaldb/src/lib.rs | 7 +---- util/journaldb/src/overlaydb.rs | 6 +--- util/journaldb/src/overlayrecentdb.rs | 38 ++++++++++++------------- util/journaldb/src/refcounteddb.rs | 8 +++--- util/journaldb/src/traits.rs | 21 ++++---------- 9 files changed, 66 insertions(+), 131 deletions(-) diff --git a/ethcore/account-state/src/backend.rs b/ethcore/account-state/src/backend.rs index 5aa675ff929..b51705235e0 100644 --- a/ethcore/account-state/src/backend.rs +++ b/ethcore/account-state/src/backend.rs @@ -21,7 +21,7 @@ //! should become general over time to the point where not even a //! merkle trie is strictly necessary. -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use std::sync::Arc; use ethereum_types::{Address, H256}; @@ -29,7 +29,6 @@ use hash_db::{AsHashDB, EMPTY_PREFIX, HashDB, Prefix}; use kvdb::DBValue; use memory_db::{HashKey, MemoryDB}; use parking_lot::Mutex; -use journaldb::AsKeyedHashDB; use keccak_hasher::KeccakHasher; use crate::account::Account; @@ -90,10 +89,6 @@ impl ProofCheck { } } -impl journaldb::KeyedHashDB for ProofCheck { - fn keys(&self) -> HashMap { self.0.keys() } -} - impl HashDB for ProofCheck { fn get(&self, key: &H256, prefix: Prefix) -> Option { self.0.get(key, prefix) @@ -146,23 +141,11 @@ pub struct Proving { proof: Mutex>, } -impl AsKeyedHashDB for Proving { - fn as_keyed_hash_db(&self) -> &dyn journaldb::KeyedHashDB { self } -} - impl + Send + Sync> AsHashDB for Proving { fn as_hash_db(&self) -> &dyn HashDB { self } fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { self } } -impl journaldb::KeyedHashDB for Proving { - fn keys(&self) -> HashMap { - let mut keys = self.base.as_keyed_hash_db().keys(); - keys.extend(self.changed.keys()); - keys - } -} - impl + Send + Sync> HashDB for Proving { fn get(&self, key: &H256, prefix: Prefix) -> Option { match self.base.as_hash_db().get(key, prefix) { diff --git a/util/journaldb/src/archivedb.rs b/util/journaldb/src/archivedb.rs index 9d6d2453c45..57e0b5cf6d0 100644 --- a/util/journaldb/src/archivedb.rs +++ b/util/journaldb/src/archivedb.rs @@ -92,28 +92,7 @@ impl HashDB for ArchiveDB { } } -impl ::traits::KeyedHashDB for ArchiveDB { - fn keys(&self) -> HashMap { - let mut ret: HashMap = self.backing.iter(self.column) - .map(|(key, _)| (H256::from_slice(&*key), 1)) - .collect(); - - for (key, refs) in self.overlay.keys() { - match ret.entry(key) { - Entry::Occupied(mut entry) => { - *entry.get_mut() += refs; - }, - Entry::Vacant(entry) => { - entry.insert(refs); - } - } - } - ret - } -} - impl JournalDB for ArchiveDB { - fn boxed_clone(&self) -> Box { Box::new(ArchiveDB { overlay: self.overlay.clone(), @@ -200,6 +179,24 @@ impl JournalDB for ArchiveDB { fn consolidate(&mut self, with: super::MemoryDB) { self.overlay.consolidate(with); } + + fn keys(&self) -> HashMap { + let mut ret: HashMap = self.backing.iter(self.column) + .map(|(key, _)| (H256::from_slice(&*key), 1)) + .collect(); + + for (key, refs) in self.overlay.keys() { + match ret.entry(key) { + Entry::Occupied(mut entry) => { + *entry.get_mut() += refs; + }, + Entry::Vacant(entry) => { + entry.insert(refs); + } + } + } + ret + } } #[cfg(test)] diff --git a/util/journaldb/src/as_hash_db_impls.rs b/util/journaldb/src/as_hash_db_impls.rs index cafed203261..d46e7deb108 100644 --- a/util/journaldb/src/as_hash_db_impls.rs +++ b/util/journaldb/src/as_hash_db_impls.rs @@ -23,7 +23,6 @@ use overlayrecentdb::OverlayRecentDB; use refcounteddb::RefCountedDB; use overlaydb::OverlayDB; use kvdb::DBValue; -use crate::{KeyedHashDB, AsKeyedHashDB}; impl AsHashDB for ArchiveDB { fn as_hash_db(&self) -> &dyn HashDB { self } @@ -49,23 +48,3 @@ impl AsHashDB for OverlayDB { fn as_hash_db(&self) -> &dyn HashDB { self } fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { self } } - -impl AsKeyedHashDB for ArchiveDB { - fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB { self } -} - -impl AsKeyedHashDB for EarlyMergeDB { - fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB { self } -} - -impl AsKeyedHashDB for OverlayRecentDB { - fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB { self } -} - -impl AsKeyedHashDB for RefCountedDB { - fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB { self } -} - -impl AsKeyedHashDB for OverlayDB { - fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB { self } -} diff --git a/util/journaldb/src/earlymergedb.rs b/util/journaldb/src/earlymergedb.rs index 65b37c95ca2..78abe46759f 100644 --- a/util/journaldb/src/earlymergedb.rs +++ b/util/journaldb/src/earlymergedb.rs @@ -311,26 +311,6 @@ impl HashDB for EarlyMergeDB { } } -impl ::traits::KeyedHashDB for EarlyMergeDB { - fn keys(&self) -> HashMap { - let mut ret: HashMap = self.backing.iter(self.column) - .map(|(key, _)| (H256::from_slice(&*key), 1)) - .collect(); - - for (key, refs) in self.overlay.keys() { - match ret.entry(key) { - Entry::Occupied(mut entry) => { - *entry.get_mut() += refs; - }, - Entry::Vacant(entry) => { - entry.insert(refs); - } - } - } - ret - } -} - impl JournalDB for EarlyMergeDB { fn boxed_clone(&self) -> Box { Box::new(EarlyMergeDB { @@ -519,6 +499,24 @@ impl JournalDB for EarlyMergeDB { fn consolidate(&mut self, with: super::MemoryDB) { self.overlay.consolidate(with); } + + fn keys(&self) -> HashMap { + let mut ret: HashMap = self.backing.iter(self.column) + .map(|(key, _)| (H256::from_slice(&*key), 1)) + .collect(); + + for (key, refs) in self.overlay.keys() { + match ret.entry(key) { + Entry::Occupied(mut entry) => { + *entry.get_mut() += refs; + }, + Entry::Vacant(entry) => { + entry.insert(refs); + } + } + } + ret + } } #[cfg(test)] diff --git a/util/journaldb/src/lib.rs b/util/journaldb/src/lib.rs index c63164fd14d..b1eb59841d9 100644 --- a/util/journaldb/src/lib.rs +++ b/util/journaldb/src/lib.rs @@ -41,6 +41,7 @@ extern crate kvdb_memorydb; use std::{fmt, str, io}; use std::sync::Arc; +#[cfg(test)] use ethereum_types::H256; /// Export the journaldb module. @@ -57,12 +58,6 @@ pub mod overlaydb; /// Export the `JournalDB` trait. pub use self::traits::JournalDB; -/// Export keyed hash trait -pub use self::traits::KeyedHashDB; -/// Export as keyed hash trait -pub use self::traits::AsKeyedHashDB; - - /// Alias to ethereum MemoryDB type MemoryDB = memory_db::MemoryDB< keccak_hasher::KeccakHasher, diff --git a/util/journaldb/src/overlaydb.rs b/util/journaldb/src/overlaydb.rs index 06b0ebed927..548d1dc9903 100644 --- a/util/journaldb/src/overlaydb.rs +++ b/util/journaldb/src/overlaydb.rs @@ -153,10 +153,7 @@ impl OverlayDB { } } -} - -impl crate::KeyedHashDB for OverlayDB { - fn keys(&self) -> HashMap { + pub fn keys(&self) -> HashMap { let mut ret: HashMap = self.backing.iter(self.column) .map(|(key, _)| { let h = H256::from_slice(&*key); @@ -177,7 +174,6 @@ impl crate::KeyedHashDB for OverlayDB { } ret } - } impl HashDB for OverlayDB { diff --git a/util/journaldb/src/overlayrecentdb.rs b/util/journaldb/src/overlayrecentdb.rs index 0209f2c975b..4742afe68b5 100644 --- a/util/journaldb/src/overlayrecentdb.rs +++ b/util/journaldb/src/overlayrecentdb.rs @@ -238,26 +238,6 @@ fn to_short_key(key: &H256) -> H256 { k } -impl ::traits::KeyedHashDB for OverlayRecentDB { - fn keys(&self) -> HashMap { - let mut ret: HashMap = self.backing.iter(self.column) - .map(|(key, _)| (H256::from_slice(&*key), 1)) - .collect(); - - for (key, refs) in self.transaction_overlay.keys() { - match ret.entry(key) { - Entry::Occupied(mut entry) => { - *entry.get_mut() += refs; - }, - Entry::Vacant(entry) => { - entry.insert(refs); - } - } - } - ret - } -} - impl JournalDB for OverlayRecentDB { fn boxed_clone(&self) -> Box { @@ -455,6 +435,24 @@ impl JournalDB for OverlayRecentDB { fn consolidate(&mut self, with: super::MemoryDB) { self.transaction_overlay.consolidate(with); } + + fn keys(&self) -> HashMap { + let mut ret: HashMap = self.backing.iter(self.column) + .map(|(key, _)| (H256::from_slice(&*key), 1)) + .collect(); + + for (key, refs) in self.transaction_overlay.keys() { + match ret.entry(key) { + Entry::Occupied(mut entry) => { + *entry.get_mut() += refs; + }, + Entry::Vacant(entry) => { + entry.insert(refs); + } + } + } + ret + } } impl HashDB for OverlayRecentDB { diff --git a/util/journaldb/src/refcounteddb.rs b/util/journaldb/src/refcounteddb.rs index bb312b241bd..ce8d2651b7c 100644 --- a/util/journaldb/src/refcounteddb.rs +++ b/util/journaldb/src/refcounteddb.rs @@ -87,10 +87,6 @@ impl HashDB for RefCountedDB { fn remove(&mut self, key: &H256, _prefix: Prefix) { self.removes.push(key.clone()); } } -impl ::traits::KeyedHashDB for RefCountedDB { - fn keys(&self) -> HashMap { self.forward.keys() } -} - impl JournalDB for RefCountedDB { fn boxed_clone(&self) -> Box { Box::new(RefCountedDB { @@ -213,6 +209,10 @@ impl JournalDB for RefCountedDB { } } } + + fn keys(&self) -> HashMap { + self.forward.keys() + } } #[cfg(test)] diff --git a/util/journaldb/src/traits.rs b/util/journaldb/src/traits.rs index 10a56df22a8..9a426cc0ba5 100644 --- a/util/journaldb/src/traits.rs +++ b/util/journaldb/src/traits.rs @@ -21,28 +21,14 @@ use std::sync::Arc; use bytes::Bytes; use ethereum_types::H256; -use hash_db::{HashDB, AsHashDB}; +use hash_db::HashDB; use keccak_hasher::KeccakHasher; use kvdb::{self, DBTransaction, DBValue}; use std::collections::HashMap; - -/// expose keys of a hashDB for debugging or tests (slow). -pub trait KeyedHashDB: HashDB { - /// Primarily use for tests, highly inefficient. - fn keys(&self) -> HashMap; -} - -/// Upcast to `KeyedHashDB` -pub trait AsKeyedHashDB: AsHashDB { - /// Perform upcast to KeyedHashDB. - fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB; -} - /// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually /// exclusive actions. -pub trait JournalDB: KeyedHashDB { - +pub trait JournalDB: HashDB { /// Return a copy of ourself, in a box. fn boxed_clone(&self) -> Box; @@ -94,4 +80,7 @@ pub trait JournalDB: KeyedHashDB { /// Consolidate all the insertions and deletions in the given memory overlay. fn consolidate(&mut self, overlay: super::MemoryDB); + + /// Primarily use for tests, highly inefficient. + fn keys(&self) -> HashMap; } From 3aeaf0b68f817e96fc99e967ded3eba563141543 Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 30 Jul 2019 20:12:49 +0200 Subject: [PATCH 3/7] journaldb edition 2018 --- util/journaldb/Cargo.toml | 1 + util/journaldb/src/archivedb.rs | 10 +++++----- util/journaldb/src/as_hash_db_impls.rs | 10 +++++----- util/journaldb/src/earlymergedb.rs | 13 +++++++------ util/journaldb/src/lib.rs | 19 ------------------- util/journaldb/src/overlaydb.rs | 11 ++++++++--- util/journaldb/src/overlayrecentdb.rs | 18 +++++++++++------- util/journaldb/src/refcounteddb.rs | 13 +++++++------ 8 files changed, 44 insertions(+), 51 deletions(-) diff --git a/util/journaldb/Cargo.toml b/util/journaldb/Cargo.toml index eada2b21311..1d0e0a43e4e 100644 --- a/util/journaldb/Cargo.toml +++ b/util/journaldb/Cargo.toml @@ -4,6 +4,7 @@ version = "0.2.0" authors = ["Parity Technologies "] description = "A `HashDB` which can manage a short-term journal potentially containing many forks of mutually exclusive actions" license = "GPL3" +edition = "2018" [dependencies] parity-bytes = "0.1" diff --git a/util/journaldb/src/archivedb.rs b/util/journaldb/src/archivedb.rs index 57e0b5cf6d0..ce965c657a0 100644 --- a/util/journaldb/src/archivedb.rs +++ b/util/journaldb/src/archivedb.rs @@ -29,7 +29,7 @@ use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; use rlp::{encode, decode}; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, error_key_already_exists, error_negatively_reference_hash}; -use traits::JournalDB; +use crate::{JournalDB, new_memory_db}; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. @@ -52,7 +52,7 @@ impl ArchiveDB { .expect("Low-level database error.") .map(|val| decode::(&val).expect("decoding db value failed")); ArchiveDB { - overlay: ::new_memory_db(), + overlay: new_memory_db(), backing, latest_era, column, @@ -201,11 +201,11 @@ impl JournalDB for ArchiveDB { #[cfg(test)] mod tests { - - use keccak::keccak; + use keccak_hash::keccak; use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; - use {kvdb_memorydb, JournalDB, inject_batch, commit_batch}; + use kvdb_memorydb; + use crate::{JournalDB, inject_batch, commit_batch}; #[test] fn insert_same_in_fork() { diff --git a/util/journaldb/src/as_hash_db_impls.rs b/util/journaldb/src/as_hash_db_impls.rs index d46e7deb108..f419fc50e7f 100644 --- a/util/journaldb/src/as_hash_db_impls.rs +++ b/util/journaldb/src/as_hash_db_impls.rs @@ -17,11 +17,11 @@ //! Impls of the `AsHashDB` upcast trait for all different variants of DB use hash_db::{HashDB, AsHashDB}; use keccak_hasher::KeccakHasher; -use archivedb::ArchiveDB; -use earlymergedb::EarlyMergeDB; -use overlayrecentdb::OverlayRecentDB; -use refcounteddb::RefCountedDB; -use overlaydb::OverlayDB; +use crate::archivedb::ArchiveDB; +use crate::earlymergedb::EarlyMergeDB; +use crate::overlayrecentdb::OverlayRecentDB; +use crate::refcounteddb::RefCountedDB; +use crate::overlaydb::OverlayDB; use kvdb::DBValue; impl AsHashDB for ArchiveDB { diff --git a/util/journaldb/src/earlymergedb.rs b/util/journaldb/src/earlymergedb.rs index 78abe46759f..2b8e4c602e5 100644 --- a/util/journaldb/src/earlymergedb.rs +++ b/util/journaldb/src/earlymergedb.rs @@ -21,6 +21,7 @@ use std::collections::hash_map::Entry; use std::io; use std::sync::Arc; +use log::{trace, warn}; use bytes::Bytes; use ethereum_types::H256; use hash_db::{HashDB, Prefix}; @@ -30,8 +31,8 @@ use kvdb::{KeyValueDB, DBTransaction, DBValue}; use parking_lot::RwLock; use rlp::{encode, decode}; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, error_negatively_reference_hash, error_key_already_exists}; -use super::traits::JournalDB; -use util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}; +use crate::{JournalDB, new_memory_db}; +use crate::util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}; #[derive(Debug, Clone, PartialEq, Eq, MallocSizeOf)] struct RefInfo { @@ -115,7 +116,7 @@ impl EarlyMergeDB { let (latest_era, refs) = EarlyMergeDB::read_refs(&*backing, col); let refs = Some(Arc::new(RwLock::new(refs))); EarlyMergeDB { - overlay: ::new_memory_db(), + overlay: new_memory_db(), backing: backing, refs: refs, latest_era: latest_era, @@ -521,11 +522,11 @@ impl JournalDB for EarlyMergeDB { #[cfg(test)] mod tests { - - use keccak::keccak; + use keccak_hash::keccak; use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; - use {kvdb_memorydb, inject_batch, commit_batch}; + use kvdb_memorydb; + use crate::{inject_batch, commit_batch}; #[test] fn insert_same_in_fork() { diff --git a/util/journaldb/src/lib.rs b/util/journaldb/src/lib.rs index b1eb59841d9..58e7ab32c3f 100644 --- a/util/journaldb/src/lib.rs +++ b/util/journaldb/src/lib.rs @@ -16,28 +16,9 @@ //! `JournalDB` interface and implementation. -extern crate parity_util_mem; -extern crate parity_util_mem as mem; extern crate parity_util_mem as malloc_size_of; -#[macro_use] -extern crate log; -extern crate ethereum_types; extern crate parity_bytes as bytes; -extern crate hash_db; -extern crate keccak_hasher; -extern crate kvdb; -extern crate memory_db; -extern crate parking_lot; -extern crate fastmap; -extern crate rlp; - -#[cfg(test)] -extern crate env_logger; -#[cfg(test)] -extern crate keccak_hash as keccak; -#[cfg(test)] -extern crate kvdb_memorydb; use std::{fmt, str, io}; use std::sync::Arc; diff --git a/util/journaldb/src/overlaydb.rs b/util/journaldb/src/overlaydb.rs index 548d1dc9903..01bcd2e3fb8 100644 --- a/util/journaldb/src/overlaydb.rs +++ b/util/journaldb/src/overlaydb.rs @@ -21,12 +21,13 @@ use std::collections::hash_map::Entry; use std::io; use std::sync::Arc; +use log::trace; use ethereum_types::H256; use rlp::{Rlp, RlpStream, Encodable, DecoderError, Decodable, encode, decode}; use hash_db::{HashDB, Prefix, EMPTY_PREFIX}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; -use super::{error_negatively_reference_hash}; +use crate::{error_negatively_reference_hash, new_memory_db}; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay. /// @@ -78,8 +79,12 @@ impl Decodable for Payload { impl OverlayDB { /// Create a new instance of OverlayDB given a `backing` database. - pub fn new(backing: Arc, col: Option) -> OverlayDB { - OverlayDB{ overlay: ::new_memory_db(), backing: backing, column: col } + pub fn new(backing: Arc, column: Option) -> OverlayDB { + OverlayDB { + overlay: new_memory_db(), + backing, + column, + } } /// Create a new instance of OverlayDB with an anonymous temporary database. diff --git a/util/journaldb/src/overlayrecentdb.rs b/util/journaldb/src/overlayrecentdb.rs index 4742afe68b5..4a689fdd063 100644 --- a/util/journaldb/src/overlayrecentdb.rs +++ b/util/journaldb/src/overlayrecentdb.rs @@ -21,6 +21,7 @@ use std::collections::hash_map::Entry; use std::io; use std::sync::Arc; +use log::trace; use bytes::Bytes; use ethereum_types::H256; use hash_db::{HashDB, Prefix, EMPTY_PREFIX}; @@ -30,8 +31,11 @@ use kvdb::{KeyValueDB, DBTransaction, DBValue}; use parking_lot::RwLock; use fastmap::H256FastMap; use rlp::{Rlp, RlpStream, encode, decode, DecoderError, Decodable, Encodable}; -use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, JournalDB, error_negatively_reference_hash}; -use util::DatabaseKey; +use crate::{ + DB_PREFIX_LEN, LATEST_ERA_KEY, JournalDB, error_negatively_reference_hash, + new_memory_db +}; +use crate::util::DatabaseKey; /// Implementation of the `JournalDB` trait for a disk-backed database with a memory overlay /// and, possibly, latent-removal semantics. @@ -150,7 +154,7 @@ impl OverlayRecentDB { pub fn new(backing: Arc, col: Option) -> OverlayRecentDB { let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&*backing, col))); OverlayRecentDB { - transaction_overlay: ::new_memory_db(), + transaction_overlay: new_memory_db(), backing: backing, journal_overlay: journal_overlay, column: col, @@ -176,7 +180,7 @@ impl OverlayRecentDB { fn read_overlay(db: &dyn KeyValueDB, col: Option) -> JournalOverlay { let mut journal = HashMap::new(); - let mut overlay = ::new_memory_db(); + let mut overlay = new_memory_db(); let mut count = 0; let mut latest_era = None; let mut earliest_era = None; @@ -488,11 +492,11 @@ impl HashDB for OverlayRecentDB { #[cfg(test)] mod tests { - - use keccak::keccak; + use keccak_hash::keccak; use super::*; use hash_db::{HashDB, EMPTY_PREFIX}; - use {kvdb_memorydb, JournalDB, inject_batch, commit_batch}; + use kvdb_memorydb; + use crate::{JournalDB, inject_batch, commit_batch}; fn new_db() -> OverlayRecentDB { let backing = Arc::new(kvdb_memorydb::create(0)); diff --git a/util/journaldb/src/refcounteddb.rs b/util/journaldb/src/refcounteddb.rs index ce8d2651b7c..17076df0074 100644 --- a/util/journaldb/src/refcounteddb.rs +++ b/util/journaldb/src/refcounteddb.rs @@ -20,17 +20,18 @@ use std::collections::HashMap; use std::io; use std::sync::Arc; +use log::trace; use bytes::Bytes; use ethereum_types::H256; use hash_db::{HashDB, Prefix, EMPTY_PREFIX}; use parity_util_mem::{MallocSizeOf, allocators::new_malloc_size_ops}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; -use overlaydb::OverlayDB; +use crate::overlaydb::OverlayDB; use rlp::{encode, decode}; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; -use super::traits::JournalDB; -use util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}; +use crate::JournalDB; +use crate::util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. @@ -217,11 +218,11 @@ impl JournalDB for RefCountedDB { #[cfg(test)] mod tests { - - use keccak::keccak; + use keccak_hash::keccak; use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; - use {JournalDB, kvdb_memorydb, inject_batch, commit_batch}; + use kvdb_memorydb; + use crate::{JournalDB, inject_batch, commit_batch}; fn new_db() -> RefCountedDB { let backing = Arc::new(kvdb_memorydb::create(0)); From 9c7b5f0d3bda7fd3bb178e6ffdea1563e4ce68aa Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 30 Jul 2019 20:18:12 +0200 Subject: [PATCH 4/7] journaldb trait moved to the lib.rs file --- util/journaldb/src/lib.rs | 76 ++++++++++++++++++++++++++--- util/journaldb/src/overlaydb.rs | 7 ++- util/journaldb/src/traits.rs | 86 --------------------------------- 3 files changed, 71 insertions(+), 98 deletions(-) delete mode 100644 util/journaldb/src/traits.rs diff --git a/util/journaldb/src/lib.rs b/util/journaldb/src/lib.rs index 58e7ab32c3f..515335ce54d 100644 --- a/util/journaldb/src/lib.rs +++ b/util/journaldb/src/lib.rs @@ -20,24 +20,84 @@ extern crate parity_util_mem as malloc_size_of; extern crate parity_bytes as bytes; -use std::{fmt, str, io}; -use std::sync::Arc; -#[cfg(test)] +use std::{ + fmt, str, io, + sync::Arc, + collections::HashMap, +}; + +use bytes::Bytes; use ethereum_types::H256; +use hash_db::HashDB; +use keccak_hasher::KeccakHasher; +use kvdb::{self, DBTransaction, DBValue}; -/// Export the journaldb module. -mod traits; mod archivedb; mod earlymergedb; mod overlayrecentdb; mod refcounteddb; mod util; mod as_hash_db_impls; +mod overlaydb; + +/// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually +/// exclusive actions. +pub trait JournalDB: HashDB { + /// Return a copy of ourself, in a box. + fn boxed_clone(&self) -> Box; + + /// Returns heap memory size used + fn mem_used(&self) -> usize; + + /// Returns the size of journalled state in memory. + /// This function has a considerable speed requirement -- + /// it must be fast enough to call several times per block imported. + fn journal_size(&self) -> usize { 0 } + + /// Check if this database has any commits + fn is_empty(&self) -> bool; + + /// Get the earliest era in the DB. None if there isn't yet any data in there. + fn earliest_era(&self) -> Option { None } + + /// Get the latest era in the DB. None if there isn't yet any data in there. + fn latest_era(&self) -> Option; + + /// Journal recent database operations as being associated with a given era and id. + // TODO: give the overlay to this function so journaldbs don't manage the overlays themselves. + fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result; -pub mod overlaydb; + /// Mark a given block as canonical, indicating that competing blocks' states may be pruned out. + fn mark_canonical(&mut self, batch: &mut DBTransaction, era: u64, id: &H256) -> io::Result; -/// Export the `JournalDB` trait. -pub use self::traits::JournalDB; + /// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions + /// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated. + /// + /// Any keys or values inserted or deleted must be completely independent of those affected + /// by any previous `commit` operations. Essentially, this means that `inject` can be used + /// either to restore a state to a fresh database, or to insert data which may only be journalled + /// from this point onwards. + fn inject(&mut self, batch: &mut DBTransaction) -> io::Result; + + /// State data query + fn state(&self, _id: &H256) -> Option; + + /// Whether this database is pruned. + fn is_prunable(&self) -> bool { true } + + /// Get backing database. + fn backing(&self) -> &Arc; + + /// Clear internal strucutres. This should called after changes have been written + /// to the backing strage + fn flush(&self) {} + + /// Consolidate all the insertions and deletions in the given memory overlay. + fn consolidate(&mut self, overlay: MemoryDB); + + /// Primarily use for tests, highly inefficient. + fn keys(&self) -> HashMap; +} /// Alias to ethereum MemoryDB type MemoryDB = memory_db::MemoryDB< diff --git a/util/journaldb/src/overlaydb.rs b/util/journaldb/src/overlaydb.rs index 01bcd2e3fb8..d9dd999aad2 100644 --- a/util/journaldb/src/overlaydb.rs +++ b/util/journaldb/src/overlaydb.rs @@ -135,10 +135,9 @@ impl OverlayDB { /// Revert all operations on this object (i.e. `insert()`s and `remove()`s) since the /// last `commit()`. - pub fn revert(&mut self) { self.overlay.clear(); } - - /// Get the number of references that would be committed. - pub fn commit_refs(&self, key: &H256) -> i32 { self.overlay.raw(key, EMPTY_PREFIX).map_or(0, |(_, refs)| refs) } + pub fn revert(&mut self) { + self.overlay.clear(); + } /// Get the refs and value of the given key. fn payload(&self, key: &H256) -> Option { diff --git a/util/journaldb/src/traits.rs b/util/journaldb/src/traits.rs deleted file mode 100644 index 9a426cc0ba5..00000000000 --- a/util/journaldb/src/traits.rs +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . - -//! Disk-backed `HashDB` implementation. - -use std::io; -use std::sync::Arc; - -use bytes::Bytes; -use ethereum_types::H256; -use hash_db::HashDB; -use keccak_hasher::KeccakHasher; -use kvdb::{self, DBTransaction, DBValue}; -use std::collections::HashMap; - -/// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually -/// exclusive actions. -pub trait JournalDB: HashDB { - /// Return a copy of ourself, in a box. - fn boxed_clone(&self) -> Box; - - /// Returns heap memory size used - fn mem_used(&self) -> usize; - - /// Returns the size of journalled state in memory. - /// This function has a considerable speed requirement -- - /// it must be fast enough to call several times per block imported. - fn journal_size(&self) -> usize { 0 } - - /// Check if this database has any commits - fn is_empty(&self) -> bool; - - /// Get the earliest era in the DB. None if there isn't yet any data in there. - fn earliest_era(&self) -> Option { None } - - /// Get the latest era in the DB. None if there isn't yet any data in there. - fn latest_era(&self) -> Option; - - /// Journal recent database operations as being associated with a given era and id. - // TODO: give the overlay to this function so journaldbs don't manage the overlays themselves. - fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result; - - /// Mark a given block as canonical, indicating that competing blocks' states may be pruned out. - fn mark_canonical(&mut self, batch: &mut DBTransaction, era: u64, id: &H256) -> io::Result; - - /// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions - /// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated. - /// - /// Any keys or values inserted or deleted must be completely independent of those affected - /// by any previous `commit` operations. Essentially, this means that `inject` can be used - /// either to restore a state to a fresh database, or to insert data which may only be journalled - /// from this point onwards. - fn inject(&mut self, batch: &mut DBTransaction) -> io::Result; - - /// State data query - fn state(&self, _id: &H256) -> Option; - - /// Whether this database is pruned. - fn is_prunable(&self) -> bool { true } - - /// Get backing database. - fn backing(&self) -> &Arc; - - /// Clear internal strucutres. This should called after changes have been written - /// to the backing strage - fn flush(&self) {} - - /// Consolidate all the insertions and deletions in the given memory overlay. - fn consolidate(&mut self, overlay: super::MemoryDB); - - /// Primarily use for tests, highly inefficient. - fn keys(&self) -> HashMap; -} From b635b0429d4bda2774b582ce3fa9511264318d1a Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 30 Jul 2019 20:29:32 +0200 Subject: [PATCH 5/7] making journaldb more idiomatic --- util/journaldb/src/archivedb.rs | 18 ++- util/journaldb/src/as_hash_db_impls.rs | 14 +- util/journaldb/src/earlymergedb.rs | 24 +-- util/journaldb/src/overlaydb.rs | 211 ++++++++++++------------- util/journaldb/src/overlayrecentdb.rs | 22 +-- util/journaldb/src/refcounteddb.rs | 24 +-- 6 files changed, 162 insertions(+), 151 deletions(-) diff --git a/util/journaldb/src/archivedb.rs b/util/journaldb/src/archivedb.rs index ce965c657a0..56730dc9052 100644 --- a/util/journaldb/src/archivedb.rs +++ b/util/journaldb/src/archivedb.rs @@ -16,20 +16,24 @@ //! Disk-backed `HashDB` implementation. -use std::collections::HashMap; -use std::collections::hash_map::Entry; -use std::io; -use std::sync::Arc; +use std::{ + collections::{HashMap, hash_map::Entry}, + io, + sync::Arc, +}; use bytes::Bytes; use ethereum_types::H256; -use parity_util_mem::MallocSizeOfExt; use hash_db::{HashDB, Prefix}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; +use parity_util_mem::MallocSizeOfExt; use rlp::{encode, decode}; -use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, error_key_already_exists, error_negatively_reference_hash}; -use crate::{JournalDB, new_memory_db}; + +use crate::{ + DB_PREFIX_LEN, LATEST_ERA_KEY, error_key_already_exists, error_negatively_reference_hash, + JournalDB, new_memory_db +}; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. diff --git a/util/journaldb/src/as_hash_db_impls.rs b/util/journaldb/src/as_hash_db_impls.rs index f419fc50e7f..eae3c1f8a0a 100644 --- a/util/journaldb/src/as_hash_db_impls.rs +++ b/util/journaldb/src/as_hash_db_impls.rs @@ -17,13 +17,17 @@ //! Impls of the `AsHashDB` upcast trait for all different variants of DB use hash_db::{HashDB, AsHashDB}; use keccak_hasher::KeccakHasher; -use crate::archivedb::ArchiveDB; -use crate::earlymergedb::EarlyMergeDB; -use crate::overlayrecentdb::OverlayRecentDB; -use crate::refcounteddb::RefCountedDB; -use crate::overlaydb::OverlayDB; + use kvdb::DBValue; +use crate::{ + archivedb::ArchiveDB, + earlymergedb::EarlyMergeDB, + overlayrecentdb::OverlayRecentDB, + refcounteddb::RefCountedDB, + overlaydb::OverlayDB, +}; + impl AsHashDB for ArchiveDB { fn as_hash_db(&self) -> &dyn HashDB { self } fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { self } diff --git a/util/journaldb/src/earlymergedb.rs b/util/journaldb/src/earlymergedb.rs index 2b8e4c602e5..deb22cbbeb6 100644 --- a/util/journaldb/src/earlymergedb.rs +++ b/util/journaldb/src/earlymergedb.rs @@ -16,23 +16,27 @@ //! Disk-backed `HashDB` implementation. -use std::collections::HashMap; -use std::collections::hash_map::Entry; -use std::io; -use std::sync::Arc; +use std::{ + collections::{HashMap, hash_map::Entry}, + io, + sync::Arc, +}; -use log::{trace, warn}; -use bytes::Bytes; use ethereum_types::H256; use hash_db::{HashDB, Prefix}; -use parity_util_mem::{MallocSizeOf, allocators::new_malloc_size_ops}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; +use log::{trace, warn}; +use parity_bytes::Bytes; +use parity_util_mem::{MallocSizeOf, allocators::new_malloc_size_ops}; use parking_lot::RwLock; use rlp::{encode, decode}; -use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, error_negatively_reference_hash, error_key_already_exists}; -use crate::{JournalDB, new_memory_db}; -use crate::util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}; + +use crate::{ + DB_PREFIX_LEN, LATEST_ERA_KEY, error_negatively_reference_hash, error_key_already_exists, + JournalDB, new_memory_db, + util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}, +}; #[derive(Debug, Clone, PartialEq, Eq, MallocSizeOf)] struct RefInfo { diff --git a/util/journaldb/src/overlaydb.rs b/util/journaldb/src/overlaydb.rs index d9dd999aad2..44870acf161 100644 --- a/util/journaldb/src/overlaydb.rs +++ b/util/journaldb/src/overlaydb.rs @@ -16,17 +16,19 @@ //! Disk-backed `HashDB` implementation. -use std::collections::HashMap; -use std::collections::hash_map::Entry; -use std::io; -use std::sync::Arc; +use std::{ + collections::{HashMap, hash_map::Entry}, + io, + sync::Arc, +}; -use log::trace; use ethereum_types::H256; -use rlp::{Rlp, RlpStream, Encodable, DecoderError, Decodable, encode, decode}; -use hash_db::{HashDB, Prefix, EMPTY_PREFIX}; +use hash_db::{HashDB, Prefix}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; +use log::trace; +use rlp::{Rlp, RlpStream, Encodable, DecoderError, Decodable, encode, decode}; + use crate::{error_negatively_reference_hash, new_memory_db}; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay. @@ -133,12 +135,6 @@ impl OverlayDB { Ok(ret) } - /// Revert all operations on this object (i.e. `insert()`s and `remove()`s) since the - /// last `commit()`. - pub fn revert(&mut self) { - self.overlay.clear(); - } - /// Get the refs and value of the given key. fn payload(&self, key: &H256) -> Option { self.backing.get(self.column, key.as_bytes()) @@ -233,106 +229,103 @@ impl HashDB for OverlayDB { fn remove(&mut self, key: &H256, prefix: Prefix) { self.overlay.remove(key, prefix); } } -#[test] -fn overlaydb_revert() { - let mut m = OverlayDB::new_temp(); - let foo = m.insert(EMPTY_PREFIX, b"foo"); // insert foo. - let mut batch = m.backing.transaction(); - m.commit_to_batch(&mut batch).unwrap(); // commit - new operations begin here... - m.backing.write(batch).unwrap(); - let bar = m.insert(EMPTY_PREFIX, b"bar"); // insert bar. - m.remove(&foo, EMPTY_PREFIX); // remove foo. - assert!(!m.contains(&foo, EMPTY_PREFIX)); // foo is gone. - assert!(m.contains(&bar, EMPTY_PREFIX)); // bar is here. - m.revert(); // revert the last two operations. - assert!(m.contains(&foo, EMPTY_PREFIX)); // foo is here. - assert!(!m.contains(&bar, EMPTY_PREFIX)); // bar is gone. -} +#[cfg(test)] +mod tests { + use hash_db::EMPTY_PREFIX; + use super::*; -#[test] -fn overlaydb_overlay_insert_and_remove() { - let mut trie = OverlayDB::new_temp(); - let h = trie.insert(EMPTY_PREFIX, b"hello world"); - assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"hello world")); - trie.remove(&h, EMPTY_PREFIX); - assert_eq!(trie.get(&h, EMPTY_PREFIX), None); -} + #[test] + fn overlaydb_revert() { + let mut m = OverlayDB::new_temp(); + let foo = m.insert(EMPTY_PREFIX, b"foo"); // insert foo. + let mut batch = m.backing.transaction(); + m.commit_to_batch(&mut batch).unwrap(); // commit - new operations begin here... + m.backing.write(batch).unwrap(); + let bar = m.insert(EMPTY_PREFIX, b"bar"); // insert bar. + m.remove(&foo, EMPTY_PREFIX); // remove foo. + assert!(!m.contains(&foo, EMPTY_PREFIX)); // foo is gone. + assert!(m.contains(&bar, EMPTY_PREFIX)); // bar is here. + } -#[test] -fn overlaydb_backing_insert_revert() { - let mut trie = OverlayDB::new_temp(); - let h = trie.insert(EMPTY_PREFIX, b"hello world"); - assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"hello world")); - trie.commit().unwrap(); - assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"hello world")); - trie.revert(); - assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"hello world")); -} + #[test] + fn overlaydb_overlay_insert_and_remove() { + let mut trie = OverlayDB::new_temp(); + let h = trie.insert(EMPTY_PREFIX, b"hello world"); + assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"hello world")); + trie.remove(&h, EMPTY_PREFIX); + assert_eq!(trie.get(&h, EMPTY_PREFIX), None); + } -#[test] -fn overlaydb_backing_remove() { - let mut trie = OverlayDB::new_temp(); - let h = trie.insert(EMPTY_PREFIX, b"hello world"); - trie.commit().unwrap(); - trie.remove(&h, EMPTY_PREFIX); - assert_eq!(trie.get(&h, EMPTY_PREFIX), None); - trie.commit().unwrap(); - assert_eq!(trie.get(&h, EMPTY_PREFIX), None); - trie.revert(); - assert_eq!(trie.get(&h, EMPTY_PREFIX), None); -} + #[test] + fn overlaydb_backing_insert_revert() { + let mut trie = OverlayDB::new_temp(); + let h = trie.insert(EMPTY_PREFIX, b"hello world"); + assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"hello world")); + trie.commit().unwrap(); + assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"hello world")); + } -#[test] -fn overlaydb_backing_remove_revert() { - let mut trie = OverlayDB::new_temp(); - let h = trie.insert(EMPTY_PREFIX, b"hello world"); - trie.commit().unwrap(); - trie.remove(&h, EMPTY_PREFIX); - assert_eq!(trie.get(&h, EMPTY_PREFIX), None); - trie.revert(); - assert_eq!(trie.get(&h, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"hello world")); -} + #[test] + fn overlaydb_backing_remove() { + let mut trie = OverlayDB::new_temp(); + let h = trie.insert(EMPTY_PREFIX, b"hello world"); + trie.commit().unwrap(); + trie.remove(&h, EMPTY_PREFIX); + assert_eq!(trie.get(&h, EMPTY_PREFIX), None); + trie.commit().unwrap(); + assert_eq!(trie.get(&h, EMPTY_PREFIX), None); + } -#[test] -fn overlaydb_negative() { - let mut trie = OverlayDB::new_temp(); - let h = trie.insert(EMPTY_PREFIX, b"hello world"); - trie.commit().unwrap(); - trie.remove(&h, EMPTY_PREFIX); - trie.remove(&h, EMPTY_PREFIX); //bad - sends us into negative refs. - assert_eq!(trie.get(&h, EMPTY_PREFIX), None); - assert!(trie.commit().is_err()); -} + #[test] + fn overlaydb_backing_remove_revert() { + let mut trie = OverlayDB::new_temp(); + let h = trie.insert(EMPTY_PREFIX, b"hello world"); + trie.commit().unwrap(); + trie.remove(&h, EMPTY_PREFIX); + assert_eq!(trie.get(&h, EMPTY_PREFIX), None); + } -#[test] -fn overlaydb_complex() { - let mut trie = OverlayDB::new_temp(); - let hfoo = trie.insert(EMPTY_PREFIX, b"foo"); - assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); - let hbar = trie.insert(EMPTY_PREFIX, b"bar"); - assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"bar")); - trie.commit().unwrap(); - assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); - assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"bar")); - trie.insert(EMPTY_PREFIX, b"foo"); // two refs - assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); - trie.commit().unwrap(); - assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); - assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"bar")); - trie.remove(&hbar, EMPTY_PREFIX); // zero refs - delete - assert_eq!(trie.get(&hbar, EMPTY_PREFIX), None); - trie.remove(&hfoo, EMPTY_PREFIX); // one ref - keep - assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); - trie.commit().unwrap(); - assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); - trie.remove(&hfoo, EMPTY_PREFIX); // zero ref - would delete, but... - assert_eq!(trie.get(&hfoo, EMPTY_PREFIX), None); - trie.insert(EMPTY_PREFIX, b"foo"); // one ref - keep after all. - assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); - trie.commit().unwrap(); - assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); - trie.remove(&hfoo, EMPTY_PREFIX); // zero ref - delete - assert_eq!(trie.get(&hfoo, EMPTY_PREFIX), None); - trie.commit().unwrap(); // - assert_eq!(trie.get(&hfoo, EMPTY_PREFIX), None); + #[test] + fn overlaydb_negative() { + let mut trie = OverlayDB::new_temp(); + let h = trie.insert(EMPTY_PREFIX, b"hello world"); + trie.commit().unwrap(); + trie.remove(&h, EMPTY_PREFIX); + trie.remove(&h, EMPTY_PREFIX); //bad - sends us into negative refs. + assert_eq!(trie.get(&h, EMPTY_PREFIX), None); + assert!(trie.commit().is_err()); + } + + #[test] + fn overlaydb_complex() { + let mut trie = OverlayDB::new_temp(); + let hfoo = trie.insert(EMPTY_PREFIX, b"foo"); + assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); + let hbar = trie.insert(EMPTY_PREFIX, b"bar"); + assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"bar")); + trie.commit().unwrap(); + assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); + assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"bar")); + trie.insert(EMPTY_PREFIX, b"foo"); // two refs + assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); + trie.commit().unwrap(); + assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); + assert_eq!(trie.get(&hbar, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"bar")); + trie.remove(&hbar, EMPTY_PREFIX); // zero refs - delete + assert_eq!(trie.get(&hbar, EMPTY_PREFIX), None); + trie.remove(&hfoo, EMPTY_PREFIX); // one ref - keep + assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); + trie.commit().unwrap(); + assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); + trie.remove(&hfoo, EMPTY_PREFIX); // zero ref - would delete, but... + assert_eq!(trie.get(&hfoo, EMPTY_PREFIX), None); + trie.insert(EMPTY_PREFIX, b"foo"); // one ref - keep after all. + assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); + trie.commit().unwrap(); + assert_eq!(trie.get(&hfoo, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"foo")); + trie.remove(&hfoo, EMPTY_PREFIX); // zero ref - delete + assert_eq!(trie.get(&hfoo, EMPTY_PREFIX), None); + trie.commit().unwrap(); // + assert_eq!(trie.get(&hfoo, EMPTY_PREFIX), None); + } } diff --git a/util/journaldb/src/overlayrecentdb.rs b/util/journaldb/src/overlayrecentdb.rs index 4a689fdd063..6b873e8ffcc 100644 --- a/util/journaldb/src/overlayrecentdb.rs +++ b/util/journaldb/src/overlayrecentdb.rs @@ -16,26 +16,28 @@ //! `JournalDB` over in-memory overlay -use std::collections::HashMap; -use std::collections::hash_map::Entry; -use std::io; -use std::sync::Arc; +use std::{ + collections::{HashMap, hash_map::Entry}, + io, + sync::Arc, +}; -use log::trace; -use bytes::Bytes; use ethereum_types::H256; +use fastmap::H256FastMap; use hash_db::{HashDB, Prefix, EMPTY_PREFIX}; -use parity_util_mem::{MallocSizeOf, allocators::new_malloc_size_ops}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; +use log::trace; +use parity_bytes::Bytes; +use parity_util_mem::{MallocSizeOf, allocators::new_malloc_size_ops}; use parking_lot::RwLock; -use fastmap::H256FastMap; use rlp::{Rlp, RlpStream, encode, decode, DecoderError, Decodable, Encodable}; + use crate::{ DB_PREFIX_LEN, LATEST_ERA_KEY, JournalDB, error_negatively_reference_hash, - new_memory_db + new_memory_db, + util::DatabaseKey }; -use crate::util::DatabaseKey; /// Implementation of the `JournalDB` trait for a disk-backed database with a memory overlay /// and, possibly, latent-removal semantics. diff --git a/util/journaldb/src/refcounteddb.rs b/util/journaldb/src/refcounteddb.rs index 17076df0074..8e874630564 100644 --- a/util/journaldb/src/refcounteddb.rs +++ b/util/journaldb/src/refcounteddb.rs @@ -16,22 +16,26 @@ //! Disk-backed, ref-counted `JournalDB` implementation. -use std::collections::HashMap; -use std::io; -use std::sync::Arc; +use std::{ + io, + sync::Arc, + collections::HashMap, +}; -use log::trace; -use bytes::Bytes; use ethereum_types::H256; use hash_db::{HashDB, Prefix, EMPTY_PREFIX}; -use parity_util_mem::{MallocSizeOf, allocators::new_malloc_size_ops}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; -use crate::overlaydb::OverlayDB; +use log::trace; +use parity_bytes::Bytes; +use parity_util_mem::{MallocSizeOf, allocators::new_malloc_size_ops}; use rlp::{encode, decode}; -use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; -use crate::JournalDB; -use crate::util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}; + +use crate::{ + overlaydb::OverlayDB, + JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY, + util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}, +}; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. From 228096e10cb8cfa913bb17065fa3d2029259628d Mon Sep 17 00:00:00 2001 From: debris Date: Tue, 30 Jul 2019 20:39:08 +0200 Subject: [PATCH 6/7] fix parity_bytes reexport --- util/journaldb/src/archivedb.rs | 2 +- util/journaldb/src/lib.rs | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/util/journaldb/src/archivedb.rs b/util/journaldb/src/archivedb.rs index 56730dc9052..9db14779fd9 100644 --- a/util/journaldb/src/archivedb.rs +++ b/util/journaldb/src/archivedb.rs @@ -22,11 +22,11 @@ use std::{ sync::Arc, }; -use bytes::Bytes; use ethereum_types::H256; use hash_db::{HashDB, Prefix}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; +use parity_bytes::Bytes; use parity_util_mem::MallocSizeOfExt; use rlp::{encode, decode}; diff --git a/util/journaldb/src/lib.rs b/util/journaldb/src/lib.rs index 515335ce54d..88c9fdcd0fe 100644 --- a/util/journaldb/src/lib.rs +++ b/util/journaldb/src/lib.rs @@ -18,19 +18,17 @@ extern crate parity_util_mem as malloc_size_of; -extern crate parity_bytes as bytes; - use std::{ fmt, str, io, sync::Arc, collections::HashMap, }; -use bytes::Bytes; use ethereum_types::H256; use hash_db::HashDB; use keccak_hasher::KeccakHasher; use kvdb::{self, DBTransaction, DBValue}; +use parity_bytes::Bytes; mod archivedb; mod earlymergedb; From 9ae0307a615dadd6ed8f94af28ff250ccd4f9924 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 31 Jul 2019 11:10:00 +0200 Subject: [PATCH 7/7] rename parity-util-mem package in Cargo.toml file --- util/journaldb/Cargo.toml | 2 +- util/journaldb/src/archivedb.rs | 2 +- util/journaldb/src/earlymergedb.rs | 2 +- util/journaldb/src/lib.rs | 2 -- util/journaldb/src/overlayrecentdb.rs | 2 +- util/journaldb/src/refcounteddb.rs | 2 +- 6 files changed, 5 insertions(+), 7 deletions(-) diff --git a/util/journaldb/Cargo.toml b/util/journaldb/Cargo.toml index 1d0e0a43e4e..051a519aeb8 100644 --- a/util/journaldb/Cargo.toml +++ b/util/journaldb/Cargo.toml @@ -10,7 +10,7 @@ edition = "2018" parity-bytes = "0.1" ethereum-types = "0.6.0" hash-db = "0.12.4" -parity-util-mem = "0.1" +malloc_size_of = { version = "0.1", package = "parity-util-mem" } keccak-hasher = { path = "../keccak-hasher" } kvdb = "0.1" log = "0.4" diff --git a/util/journaldb/src/archivedb.rs b/util/journaldb/src/archivedb.rs index 9db14779fd9..e0bb6cc6212 100644 --- a/util/journaldb/src/archivedb.rs +++ b/util/journaldb/src/archivedb.rs @@ -26,8 +26,8 @@ use ethereum_types::H256; use hash_db::{HashDB, Prefix}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; +use malloc_size_of::MallocSizeOfExt; use parity_bytes::Bytes; -use parity_util_mem::MallocSizeOfExt; use rlp::{encode, decode}; use crate::{ diff --git a/util/journaldb/src/earlymergedb.rs b/util/journaldb/src/earlymergedb.rs index deb22cbbeb6..9589212b9ef 100644 --- a/util/journaldb/src/earlymergedb.rs +++ b/util/journaldb/src/earlymergedb.rs @@ -27,8 +27,8 @@ use hash_db::{HashDB, Prefix}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; use log::{trace, warn}; +use malloc_size_of::{MallocSizeOf, allocators::new_malloc_size_ops}; use parity_bytes::Bytes; -use parity_util_mem::{MallocSizeOf, allocators::new_malloc_size_ops}; use parking_lot::RwLock; use rlp::{encode, decode}; diff --git a/util/journaldb/src/lib.rs b/util/journaldb/src/lib.rs index 88c9fdcd0fe..0024b3638c0 100644 --- a/util/journaldb/src/lib.rs +++ b/util/journaldb/src/lib.rs @@ -16,8 +16,6 @@ //! `JournalDB` interface and implementation. -extern crate parity_util_mem as malloc_size_of; - use std::{ fmt, str, io, sync::Arc, diff --git a/util/journaldb/src/overlayrecentdb.rs b/util/journaldb/src/overlayrecentdb.rs index 6b873e8ffcc..0c0f92d5eb0 100644 --- a/util/journaldb/src/overlayrecentdb.rs +++ b/util/journaldb/src/overlayrecentdb.rs @@ -28,8 +28,8 @@ use hash_db::{HashDB, Prefix, EMPTY_PREFIX}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; use log::trace; +use malloc_size_of::{MallocSizeOf, allocators::new_malloc_size_ops}; use parity_bytes::Bytes; -use parity_util_mem::{MallocSizeOf, allocators::new_malloc_size_ops}; use parking_lot::RwLock; use rlp::{Rlp, RlpStream, encode, decode, DecoderError, Decodable, Encodable}; diff --git a/util/journaldb/src/refcounteddb.rs b/util/journaldb/src/refcounteddb.rs index 8e874630564..884ed1bace6 100644 --- a/util/journaldb/src/refcounteddb.rs +++ b/util/journaldb/src/refcounteddb.rs @@ -27,8 +27,8 @@ use hash_db::{HashDB, Prefix, EMPTY_PREFIX}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; use log::trace; +use malloc_size_of::{MallocSizeOf, allocators::new_malloc_size_ops}; use parity_bytes::Bytes; -use parity_util_mem::{MallocSizeOf, allocators::new_malloc_size_ops}; use rlp::{encode, decode}; use crate::{