Skip to content

Commit

Permalink
feat(all): fix clippy-1
Browse files Browse the repository at this point in the history
  • Loading branch information
devillove084 committed Mar 9, 2024
1 parent a99c6c0 commit abb453a
Show file tree
Hide file tree
Showing 25 changed files with 244 additions and 265 deletions.
2 changes: 1 addition & 1 deletion src/storage_engine/src/cache/bloom_filter_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ mod tests {

let mut rate: f32 = 0.0;
for i in 0..n {
if h.assert_num(i + 1000000000, true, true) {
if h.assert_num(i + 1_000_000_000, true, true) {
rate += 1.0;
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/storage_engine/src/cache/lru_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ where
impl<K, V: Clone> Drop for LRUCache<K, V> {
fn drop(&mut self) {
let mut l = self.inner.lock().unwrap();
(*l).table.values_mut().for_each(|e| unsafe {
l.table.values_mut().for_each(|e| unsafe {
ptr::drop_in_place(e.key.as_mut_ptr());
ptr::drop_in_place(e.value.as_mut_ptr());
});
Expand Down
6 changes: 3 additions & 3 deletions src/storage_engine/src/cache/sharded_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ where
V: Sync + Send + Clone,
{
/// Create a new `ShardedCache` with given shards
pub fn new(shards: Vec<C>) -> Self {
#[must_use] pub fn new(shards: Vec<C>) -> Self {
Self {
shards: Arc::new(shards),
_k: PhantomData,
Expand All @@ -38,7 +38,7 @@ where
let mut s = DefaultHasher::new();
let len = self.shards.len();
k.hash(&mut s);
s.finish() as usize % len
usize::try_from(s.finish()).expect("truncate error") % len
}
}

Expand All @@ -60,7 +60,7 @@ where

fn erase(&self, key: &K) {
let idx = self.find_shard(key);
self.shards[idx].erase(key)
self.shards[idx].erase(key);
}

fn total_charge(&self) -> usize {
Expand Down
29 changes: 13 additions & 16 deletions src/storage_engine/src/cache/table_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,22 +45,19 @@ impl<S: Storage + Clone, C: Comparator + 'static> TableCache<S, C> {
file_number: u64,
file_size: u64,
) -> TemplateResult<Arc<Table<S::F>>> {
match self.cache.get(&file_number) {
Some(v) => Ok(v),
None => {
let filename = generate_filename(&self.db_path, FileType::Table, file_number);
let table_file = self.storage.open(&filename)?;
let table = Table::open(
table_file,
file_number,
file_size,
self.options.clone(),
cmp,
)?;
let value = Arc::new(table);
let _ = self.cache.insert(file_number, value.clone(), 1);
Ok(value)
}
if let Some(v) = self.cache.get(&file_number) { Ok(v) } else {
let filename = generate_filename(&self.db_path, FileType::Table, file_number);
let table_file = self.storage.open(filename)?;
let table = Table::open(
table_file,
file_number,
file_size,
self.options.clone(),
cmp,
)?;
let value = Arc::new(table);
let _ = self.cache.insert(file_number, value.clone(), 1);
Ok(value)
}
}

Expand Down
36 changes: 18 additions & 18 deletions src/storage_engine/src/compaction/compact.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ impl CompactionInputs {
}

#[inline]
pub fn desc_base_files(&self) -> String {
#[must_use] pub fn desc_base_files(&self) -> String {
self.base
.iter()
.map(|f| f.number.to_string())
Expand All @@ -61,7 +61,7 @@ impl CompactionInputs {
}

#[inline]
pub fn desc_parent_files(&self) -> String {
#[must_use] pub fn desc_parent_files(&self) -> String {
self.parent
.iter()
.map(|f| f.number.to_string())
Expand Down Expand Up @@ -171,15 +171,15 @@ impl<O: File, C: Comparator + 'static> Compaction<O, C> {
// Level-0 files have to be merged together so we generate a merging iterator includes
// iterators for each level 0 file. For other levels, we will make a concatenating
// iterator per level.
let mut level0 = Vec::with_capacity(self.inputs.base.len() + 1);
let mut leveln = Vec::with_capacity(2);
let mut level_0 = Vec::with_capacity(self.inputs.base.len() + 1);
let mut level_n = Vec::with_capacity(2);
if self.level == 0 {
for file in self.inputs.base.iter() {
for file in &self.inputs.base {
debug!(
"new level {} table iter: number {}, file size {}, [{:?} ... {:?}]",
self.level, file.number, file.file_size, file.smallest, file.largest
);
level0.push(table_cache.new_iter(
level_0.push(table_cache.new_iter(
icmp.clone(),
read_options,
file.number,
Expand All @@ -195,7 +195,7 @@ impl<O: File, C: Comparator + 'static> Compaction<O, C> {
}
let origin = LevelFileNumIterator::new(icmp.clone(), self.inputs.base.clone());
let factory = FileIterFactory::new(icmp.clone(), read_options, table_cache.clone());
leveln.push(ConcatenateIterator::new(origin, factory));
level_n.push(ConcatenateIterator::new(origin, factory));
}
if !self.inputs.parent.is_empty() {
for f in &self.inputs.parent {
Expand All @@ -210,10 +210,10 @@ impl<O: File, C: Comparator + 'static> Compaction<O, C> {
}
let origin = LevelFileNumIterator::new(icmp.clone(), self.inputs.parent.clone());
let factory = FileIterFactory::new(icmp.clone(), read_options, table_cache);
leveln.push(ConcatenateIterator::new(origin, factory));
level_n.push(ConcatenateIterator::new(origin, factory));
}

let iter = KMergeIter::new(SSTableIters::new(icmp, level0, leveln));
let iter = KMergeIter::new(SSTableIters::new(icmp, level_0, level_n));
Ok(iter)
}

Expand All @@ -229,7 +229,7 @@ impl<O: File, C: Comparator + 'static> Compaction<O, C> {
) == CmpOrdering::Greater
{
if self.seen_key {
self.overlapped_bytes += self.grand_parents[self.grand_parent_index].file_size
self.overlapped_bytes += self.grand_parents[self.grand_parent_index].file_size;
}
self.grand_parent_index += 1;
}
Expand Down Expand Up @@ -272,17 +272,17 @@ impl<O: File, C: Comparator + 'static> Compaction<O, C> {

/// Apply deletion for current inputs and current output files to the edit
pub fn apply_to_edit(&mut self) {
for f in self.inputs.base.iter() {
self.edit.delete_file(self.level, f.number)
for f in &self.inputs.base {
self.edit.delete_file(self.level, f.number);
}
for f in self.inputs.parent.iter() {
self.edit.delete_file(self.level + 1, f.number)
for f in &self.inputs.parent {
self.edit.delete_file(self.level + 1, f.number);
}
for output in self.outputs.drain(..) {
self.edit
.file_delta
.new_files
.push((self.level + 1, output))
.push((self.level + 1, output));
}
}

Expand Down Expand Up @@ -346,11 +346,11 @@ pub fn total_range<'a, C: Comparator>(
if !next_l_files.is_empty() {
let first = next_l_files.first().unwrap();
if icmp.compare(first.smallest.data(), smallest.data()) == CmpOrdering::Less {
smallest = &first.smallest
smallest = &first.smallest;
}
let last = next_l_files.last().unwrap();
if icmp.compare(last.largest.data(), largest.data()) == CmpOrdering::Greater {
largest = &last.largest
largest = &last.largest;
}
}
(smallest, largest)
Expand All @@ -362,6 +362,6 @@ pub struct CompactionStats {
pub micros: u64,
/// The data size read by this compaction
pub bytes_read: u64,
/// The data size created in new generated SSTables
/// The data size created in new generated `SSTables`
pub bytes_written: u64,
}
65 changes: 32 additions & 33 deletions src/storage_engine/src/db_impl/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -458,22 +458,22 @@ mod tests {
// Test getting kv from immutable memtable and SSTable
fn test_get_from_immutable_layer() {
for t in cases(|mut opt| {
opt.write_buffer_size = 100000; // Small write buffer
opt.write_buffer_size = 100_000; // Small write buffer
opt
}) {
t.assert_put_get("foo", "v1");
// block `flush()`
t.store.delay_data_sync.store(true, Ordering::Release);
t.put("k1", &"x".repeat(100000)).unwrap(); // fill memtable
t.put("k1", &"x".repeat(100_000)).unwrap(); // fill memtable
assert_eq!("v1", t.get("foo", None).unwrap()); // "v1" on immutable table
t.put("k2", &"y".repeat(100000)).unwrap(); // trigger compaction
t.put("k2", &"y".repeat(100_000)).unwrap(); // trigger compaction
// Waiting for compaction finish
thread::sleep(Duration::from_secs(2));
t.assert_file_num_at_level(2, 1);
// Try to retrieve key "foo" from level 0 files
t.assert_get("k1", Some(&"x".repeat(100000)));
t.assert_get("k1", Some(&"x".repeat(100_000)));
assert_eq!("v1", t.get("foo", None).unwrap()); // "v1" on SST files
t.assert_get("k2", Some(&"y".repeat(100000)));
t.assert_get("k2", Some(&"y".repeat(100_000)));
}
}

Expand All @@ -491,7 +491,7 @@ mod tests {
// Test look up key with snapshot
fn test_get_with_snapshot() {
for t in default_cases() {
for key in vec![String::from("foo"), "x".repeat(20)] {
for key in [String::from("foo"), "x".repeat(20)] {
t.assert_put_get(&key, "v1");
let s = t.db.snapshot();
t.put(&key, "v2").unwrap();
Expand Down Expand Up @@ -550,7 +550,7 @@ mod tests {

// Test that "get" always retrieve entries from the right sst file
#[test]
fn test_get_level0_ordering() {
fn test_get_level_0_ordering() {
for t in default_cases() {
t.put("bar", "b").unwrap();
t.put("foo", "v1").unwrap();
Expand Down Expand Up @@ -1122,7 +1122,7 @@ mod tests {
t.put(&key(3), &rand_string(10000)).unwrap();
t.put(&key(4), &big1).unwrap();
t.put(&key(5), &rand_string(10000)).unwrap();
t.put(&key(6), &rand_string(300000)).unwrap();
t.put(&key(6), &rand_string(300_000)).unwrap();
t.put(&key(7), &rand_string(10000)).unwrap();
if t.opt.reuse_logs {
t.inner.force_compact_mem_table().unwrap();
Expand All @@ -1132,13 +1132,13 @@ mod tests {
t.assert_approximate_size("", &key(0), 0, 0);
t.assert_approximate_size("", &key(1), 10000, 11000);
t.assert_approximate_size("", &key(2), 20000, 21000);
t.assert_approximate_size("", &key(3), 120000, 121000);
t.assert_approximate_size("", &key(4), 130000, 131000);
t.assert_approximate_size("", &key(5), 230000, 231000);
t.assert_approximate_size("", &key(6), 240000, 241000);
t.assert_approximate_size("", &key(7), 540000, 541000);
t.assert_approximate_size("", &key(8), 550000, 560000);
t.assert_approximate_size(&key(3), &key(5), 110000, 111000);
t.assert_approximate_size("", &key(3), 120_000, 121_000);
t.assert_approximate_size("", &key(4), 130_000, 131_000);
t.assert_approximate_size("", &key(5), 230_000, 231_000);
t.assert_approximate_size("", &key(6), 240_000, 241_000);
t.assert_approximate_size("", &key(7), 540_000, 541_000);
t.assert_approximate_size("", &key(8), 550_000, 560_000);
t.assert_approximate_size(&key(3), &key(5), 110_000, 111_000);
t.compact_range_at(0, None, None).unwrap();
}
}
Expand Down Expand Up @@ -1264,7 +1264,7 @@ mod tests {
}

#[test]
fn test_overlap_in_level0() {
fn test_overlap_in_level_0() {
for t in default_cases() {
// Fill levels 1 and 2 to disable the pushing or new memtables to levels > 0
t.put("100", "v100").unwrap();
Expand Down Expand Up @@ -1644,25 +1644,24 @@ mod tests {
break;
}
}
} else {
match state
.db
.get(ReadOptions::default(), key.to_string().as_bytes())
{
Ok(v) => {
if let Some(value) = v {
let s = String::from_utf8(value).unwrap();
let ss = s.split('.').collect::<Vec<_>>();
assert_eq!(3, ss.len());
assert_eq!(ss[0], key.to_string());
}
}
Err(e) => {
let mut guard = state.rerrs.lock().unwrap();
guard.push(e);
break;
}
match state
.db
.get(ReadOptions::default(), key.to_string().as_bytes())
{
Ok(v) => {
if let Some(value) = v {
let s = String::from_utf8(value).unwrap();
let ss = s.split('.').collect::<Vec<_>>();
assert_eq!(3, ss.len());
assert_eq!(ss[0], key.to_string());
}
}
Err(e) => {
let mut guard = state.rerrs.lock().unwrap();
guard.push(e);
break;
}
}
counter += 1;
}
Expand Down
Loading

0 comments on commit abb453a

Please sign in to comment.