diff --git a/CHANGELOG.md b/CHANGELOG.md index 861f5fc6..12706caf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Moka Cache — Change Log +## Version 0.9.4 + +### Fixed + +- Fix memory leak after dropping a `sync` or `future` cache ([#177][gh-pull-0177]): + - This leaked the value part of the cache entries. + + ## Version 0.9.3 ### Added @@ -468,6 +476,7 @@ The minimum supported Rust version (MSRV) is now 1.51.0 (2021-03-25). [gh-issue-0034]: https://github.com/moka-rs/moka/issues/34/ [gh-issue-0031]: https://github.com/moka-rs/moka/issues/31/ +[gh-pull-0177]: https://github.com/moka-rs/moka/pull/177/ [gh-pull-0169]: https://github.com/moka-rs/moka/pull/169/ [gh-pull-0167]: https://github.com/moka-rs/moka/pull/167/ [gh-pull-0165]: https://github.com/moka-rs/moka/pull/165/ diff --git a/Cargo.toml b/Cargo.toml index 61de0430..48f6cff8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "moka" -version = "0.9.3" +version = "0.9.4" edition = "2018" rust-version = "1.51" diff --git a/src/cht/segment.rs b/src/cht/segment.rs index c0a3968a..64e88f17 100644 --- a/src/cht/segment.rs +++ b/src/cht/segment.rs @@ -496,6 +496,8 @@ impl HashMap { impl Drop for HashMap { fn drop(&mut self) { + // Important: Since we are using a dummy guard returned by `unprotected`, + // those `defer_*` functions will be executed immediately. let guard = unsafe { &crossbeam_epoch::unprotected() }; atomic::fence(Ordering::Acquire); @@ -514,13 +516,22 @@ impl Drop for HashMap { .iter() .map(|b| b.load(Ordering::Relaxed, guard)) .filter(|p| !p.is_null()) - .filter(|p| next_ptr.is_null() || p.tag() & bucket::TOMBSTONE_TAG == 0) { - // only delete tombstones from the newest bucket array - // the only way this becomes a memory leak is if there was a panic during a rehash, - // in which case I am going to say that running destructors and freeing memory is - // best-effort, and my best effort is to not do it - unsafe { bucket::defer_acquire_destroy(guard, this_bucket_ptr) }; + if bucket::is_tombstone(this_bucket_ptr) { + // Only delete tombstones from the newest bucket array. + // The only way this becomes a memory leak is if there was a + // panic during a rehash, in which case we are going to say + // that running destructors and freeing memory is + // best-effort, and our best effort is to not do it + if next_ptr.is_null() { + // Since this bucket is a tombstone, its value should have + // been dropped already. So, here, we only drop the key. + unsafe { bucket::defer_acquire_destroy(guard, this_bucket_ptr) }; + } + } else { + // This bucket is live. Drop its key and value. (Fixes #176) + unsafe { bucket::defer_destroy_bucket(guard, this_bucket_ptr) }; + } } unsafe { bucket::defer_acquire_destroy(guard, current_ptr) }; @@ -1375,7 +1386,7 @@ mod tests { None ); } - } + } // The map should be dropped here. run_deferred(); @@ -1535,8 +1546,151 @@ mod tests { None ); } + } // The map should be dropped here. + + run_deferred(); + + for this_key_parent in key_parents.iter() { + assert!(this_key_parent.was_dropped()); } + for this_value_parent in value_parents.iter() { + assert!(this_value_parent.was_dropped()); + } + } + + #[test] + fn drop_map_after_concurrent_updates() { + const NUM_THREADS: usize = 64; + const NUM_VALUES_PER_THREAD: usize = 512; + const NUM_VALUES: usize = NUM_THREADS * NUM_VALUES_PER_THREAD; + + let key_parents: Arc> = Arc::new( + std::iter::repeat_with(|| Arc::new(DropNotifier::new())) + .take(NUM_VALUES) + .collect(), + ); + let value_parents: Arc> = Arc::new( + std::iter::repeat_with(|| Arc::new(DropNotifier::new())) + .take(NUM_VALUES) + .collect(), + ); + + { + let map = Arc::new(HashMap::with_capacity(0)); + assert!(map.is_empty()); + assert_eq!(map.len(), 0); + + let barrier = Arc::new(Barrier::new(NUM_THREADS)); + + #[allow(clippy::needless_collect)] + let handles: Vec<_> = (0..NUM_THREADS) + .map(|i| { + let map = Arc::clone(&map); + let barrier = Arc::clone(&barrier); + let key_parents = Arc::clone(&key_parents); + let value_parents = Arc::clone(&value_parents); + + spawn(move || { + barrier.wait(); + + let these_key_parents = &key_parents + [i * NUM_VALUES_PER_THREAD..(i + 1) * NUM_VALUES_PER_THREAD]; + let these_value_parents = &value_parents + [i * NUM_VALUES_PER_THREAD..(i + 1) * NUM_VALUES_PER_THREAD]; + + for (j, (this_key_parent, this_value_parent)) in these_key_parents + .iter() + .zip(these_value_parents.iter()) + .enumerate() + { + let key_value = (i * NUM_VALUES_PER_THREAD + j) as i32; + let hash = map.hash(&key_value); + + assert_eq!( + map.insert_entry_and( + NoisyDropper::new(Arc::clone(this_key_parent), key_value), + hash, + NoisyDropper::new(Arc::clone(this_value_parent), key_value), + |_, _| () + ), + None + ); + } + }) + }) + .collect(); + + for result in handles.into_iter().map(JoinHandle::join) { + assert!(result.is_ok()); + } + + assert!(!map.is_empty()); + assert_eq!(map.len(), NUM_VALUES); + + run_deferred(); + + for this_key_parent in key_parents.iter() { + assert!(!this_key_parent.was_dropped()); + } + + for this_value_parent in value_parents.iter() { + assert!(!this_value_parent.was_dropped()); + } + + for i in (0..NUM_VALUES).map(|i| i as i32) { + assert_eq!( + map.get_key_value_and( + map.hash(&i), + |k| k == &i, + |k, v| { + assert_eq!(**k, i); + assert_eq!(*v, i); + } + ), + Some(()) + ); + } + + #[allow(clippy::needless_collect)] + let handles: Vec<_> = (0..NUM_THREADS) + .map(|i| { + let map = Arc::clone(&map); + let barrier = Arc::clone(&barrier); + + spawn(move || { + barrier.wait(); + + for j in 0..NUM_VALUES_PER_THREAD { + let key_value = (i * NUM_VALUES_PER_THREAD + j) as i32; + + if key_value % 4 == 0 { + assert_eq!( + map.remove_entry_if_and( + map.hash(&key_value), + |k| k == &key_value, + |_, _| true, + |k, v| { + assert_eq!(**k, key_value); + assert_eq!(*v, key_value); + } + ), + Some(()) + ); + } + } + }) + }) + .collect(); + + for result in handles.into_iter().map(JoinHandle::join) { + assert!(result.is_ok()); + } + + assert!(!map.is_empty()); + assert_eq!(map.len(), NUM_VALUES / 4 * 3); + } // The map should be dropped here. + run_deferred(); for this_key_parent in key_parents.iter() {