diff --git a/CHANGELOG.md b/CHANGELOG.md index dcc09296..7fc62b1c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Moka Cache — Change Log +## Version 0.12.5 + +### Added + +- Added support for a plain LRU (Least Recently Used) eviction policy + ([#390][gh-pull-0390]): + - The LRU policy is enabled by calling the `eviction_policy` method of the cache + builder with a policy obtained by `EvictionPolicy::lru` function. + - The default eviction policy remains the TinyLFU (Tiny, Least Frequently Used) + as it maintains better hit rate than LRU for most use cases. TinyLFU combines + LRU eviction policy and popularity-based admission policy. A probabilistic data + structure is used to estimate historical popularity of both hit and missed + keys. (not only the keys currently in the cache.) + - However, some use cases may prefer LRU policy over TinyLFU. An example is + recency biased workload such as streaming data processing. LRU policy can be + used for them to achieve better hit rate. + - Note that we are planning to add an adaptive eviction/admission policy called + Window-TinyLFU in the future. It will adjust the balance between recency and + frequency based on the current workload. + + ## Version 0.12.4 ### Fixed @@ -8,7 +29,7 @@ - `crossbeam-epoch` crate provides an epoch-based memory reclamation scheme for concurrent data structures. It is used by Moka cache to safely drop cached entries while they are still being accessed by other threads. - - `crossbeam-epoch` does the best to reclaim memory (drop the entries evicted + - `crossbeam-epoch` does its best to reclaim memory (drop the entries evicted from the cache) when the epoch is advanced. However, it does not guarantee that memory will be reclaimed immediately after the epoch is advanced. This means that entries can remain in the memory for a while after the cache is dropped. @@ -829,6 +850,7 @@ The minimum supported Rust version (MSRV) is now 1.51.0 (Mar 25, 2021). [gh-issue-0034]: https://github.com/moka-rs/moka/issues/34/ [gh-issue-0031]: https://github.com/moka-rs/moka/issues/31/ +[gh-pull-0390]: https://github.com/moka-rs/moka/pull/390/ [gh-pull-0384]: https://github.com/moka-rs/moka/pull/384/ [gh-pull-0382]: https://github.com/moka-rs/moka/pull/382/ [gh-pull-0376]: https://github.com/moka-rs/moka/pull/376/ diff --git a/Cargo.toml b/Cargo.toml index fca4ffab..2280afa6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "moka" -version = "0.12.4" +version = "0.12.5" edition = "2021" # Rust 1.65 was released on Nov 3, 2022. rust-version = "1.65" diff --git a/src/future/base_cache.rs b/src/future/base_cache.rs index fc733003..cd35933a 100644 --- a/src/future/base_cache.rs +++ b/src/future/base_cache.rs @@ -27,7 +27,7 @@ use crate::{ }, future::CancelGuard, notification::{AsyncEvictionListener, RemovalCause}, - policy::ExpirationPolicy, + policy::{EvictionPolicy, EvictionPolicyConfig, ExpirationPolicy}, sync_base::iter::ScanningGet, Entry, Expiry, Policy, PredicateError, }; @@ -167,6 +167,7 @@ where initial_capacity: Option, build_hasher: S, weigher: Option>, + eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, @@ -187,6 +188,7 @@ where initial_capacity, build_hasher, weigher, + eviction_policy, eviction_listener, r_rcv, w_rcv, @@ -1041,6 +1043,7 @@ pub(crate) struct Inner { read_op_ch: Receiver>, write_op_ch: Receiver>, maintenance_task_lock: RwLock<()>, + eviction_policy: EvictionPolicyConfig, expiration_policy: ExpirationPolicy, valid_after: AtomicInstant, weigher: Option>, @@ -1191,6 +1194,7 @@ where initial_capacity: Option, build_hasher: S, weigher: Option>, + eviction_policy: EvictionPolicy, eviction_listener: Option>, read_op_ch: Receiver>, write_op_ch: Receiver>, @@ -1247,6 +1251,7 @@ where read_op_ch, write_op_ch, maintenance_task_lock: RwLock::default(), + eviction_policy: eviction_policy.config, expiration_policy, valid_after: AtomicInstant::default(), weigher, @@ -1451,7 +1456,9 @@ where .await; } - if self.should_enable_frequency_sketch(&eviction_state.counters) { + if self.eviction_policy == EvictionPolicyConfig::TinyLfu + && self.should_enable_frequency_sketch(&eviction_state.counters) + { self.enable_frequency_sketch(&eviction_state.counters).await; } @@ -1742,15 +1749,21 @@ where } } - let mut candidate = EntrySizeAndFrequency::new(new_weight); - candidate.add_frequency(freq, kh.hash); + // TODO: Refactoring the policy implementations. + // https://github.com/moka-rs/moka/issues/389 // Try to admit the candidate. - // - // NOTE: We need to call `admit` here, instead of a part of the `match` - // expression. Otherwise the future returned from this `handle_upsert` method - // will not be `Send`. - let admission_result = Self::admit(&candidate, &self.cache, deqs, freq); + let admission_result = match &self.eviction_policy { + EvictionPolicyConfig::TinyLfu => { + let mut candidate = EntrySizeAndFrequency::new(new_weight); + candidate.add_frequency(freq, kh.hash); + Self::admit(&candidate, &self.cache, deqs, freq) + } + EvictionPolicyConfig::Lru => AdmissionResult::Admitted { + victim_keys: SmallVec::default(), + }, + }; + match admission_result { AdmissionResult::Admitted { victim_keys } => { // Try to remove the victims from the hash map. @@ -2760,7 +2773,7 @@ fn is_expired_by_ttl( #[cfg(test)] mod tests { - use crate::policy::ExpirationPolicy; + use crate::policy::{EvictionPolicy, ExpirationPolicy}; use super::BaseCache; @@ -2779,8 +2792,9 @@ mod tests { None, RandomState::default(), None, + EvictionPolicy::default(), None, - Default::default(), + ExpirationPolicy::default(), false, ); cache.inner.enable_frequency_sketch_for_testing().await; @@ -3127,6 +3141,7 @@ mod tests { None, RandomState::default(), None, + EvictionPolicy::default(), None, ExpirationPolicy::new( Some(Duration::from_secs(TTL)), diff --git a/src/future/builder.rs b/src/future/builder.rs index ca7f52ad..7c6943de 100644 --- a/src/future/builder.rs +++ b/src/future/builder.rs @@ -2,7 +2,7 @@ use super::{Cache, FutureExt}; use crate::{ common::{builder_utils, concurrent::Weigher}, notification::{AsyncEvictionListener, ListenerFuture, RemovalCause}, - policy::ExpirationPolicy, + policy::{EvictionPolicy, ExpirationPolicy}, Expiry, }; @@ -60,6 +60,7 @@ pub struct CacheBuilder { max_capacity: Option, initial_capacity: Option, weigher: Option>, + eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, @@ -77,6 +78,7 @@ where max_capacity: None, initial_capacity: None, weigher: None, + eviction_policy: EvictionPolicy::default(), eviction_listener: None, expiration_policy: ExpirationPolicy::default(), invalidator_enabled: false, @@ -116,6 +118,7 @@ where self.initial_capacity, build_hasher, self.weigher, + self.eviction_policy, self.eviction_listener, self.expiration_policy, self.invalidator_enabled, @@ -212,6 +215,7 @@ where self.initial_capacity, hasher, self.weigher, + self.eviction_policy, self.eviction_listener, self.expiration_policy, self.invalidator_enabled, @@ -245,6 +249,19 @@ impl CacheBuilder { } } + /// Sets the eviction (and admission) policy of the cache. + /// + /// The default policy is TinyLFU. See [`EvictionPolicy`][eviction-policy] for + /// more details. + /// + /// [eviction-policy]: ../policy/struct.EvictionPolicy.html + pub fn eviction_policy(self, policy: EvictionPolicy) -> Self { + Self { + eviction_policy: policy, + ..self + } + } + /// Sets the weigher closure to the cache. /// /// The closure should take `&K` and `&V` as the arguments and returns a `u32` @@ -276,8 +293,8 @@ impl CacheBuilder { /// /// It is very important to make the listener closure not to panic. Otherwise, /// the cache will stop calling the listener after a panic. This is an intended - /// behavior because the cache cannot know whether is is memory safe or not to - /// call the panicked lister again. + /// behavior because the cache cannot know whether it is memory safe or not to + /// call the panicked listener again. /// /// [removal-cause]: ../notification/enum.RemovalCause.html /// [example]: ./struct.Cache.html#per-entry-expiration-policy @@ -316,8 +333,8 @@ impl CacheBuilder { /// /// It is very important to make the listener closure not to panic. Otherwise, /// the cache will stop calling the listener after a panic. This is an intended - /// behavior because the cache cannot know whether is is memory safe or not to - /// call the panicked lister again. + /// behavior because the cache cannot know whether it is memory safe or not to + /// call the panicked listener again. /// /// [removal-cause]: ../notification/enum.RemovalCause.html /// [listener-future]: ../notification/type.ListenerFuture.html diff --git a/src/future/cache.rs b/src/future/cache.rs index 523f49cd..16312246 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -8,7 +8,7 @@ use crate::{ common::concurrent::Weigher, notification::AsyncEvictionListener, ops::compute::{self, CompResult}, - policy::ExpirationPolicy, + policy::{EvictionPolicy, ExpirationPolicy}, Entry, Policy, PredicateError, }; @@ -150,8 +150,8 @@ use std::sync::atomic::{AtomicBool, Ordering}; /// /// - Create a clone of the cache by calling its `clone` method and pass it to other /// task. -/// - If you are using a web application framework such as Actix Web or Axum, -/// you can store a cache in Actix Web's [`web::Data`][actix-web-data] or Axum's +/// - If you are using a web application framework such as Actix Web or Axum, you can +/// store a cache in Actix Web's [`web::Data`][actix-web-data] or Axum's /// [shared state][axum-state-extractor], and access it from each request handler. /// - Wrap the cache by a `sync::OnceCell` or `sync::Lazy` from /// [once_cell][once-cell-crate] create, and set it to a `static` variable. @@ -302,8 +302,8 @@ use std::sync::atomic::{AtomicBool, Ordering}; /// updated, one of these methods is called. These methods return an /// `Option`, which is used as the expiration duration of the entry. /// -/// `Expiry` trait provides the default implementations of these methods, so you -/// will implement only the methods you want to customize. +/// `Expiry` trait provides the default implementations of these methods, so you will +/// implement only the methods you want to customize. /// /// [exp-create]: ../trait.Expiry.html#method.expire_after_create /// [exp-read]: ../trait.Expiry.html#method.expire_after_read @@ -446,7 +446,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; /// The following example demonstrates how to use an eviction listener with /// time-to-live expiration to manage the lifecycle of temporary files on a /// filesystem. The cache stores the paths of the files, and when one of them has -/// expired, the eviction lister will be called with the path, so it can remove the +/// expired, the eviction listener will be called with the path, so it can remove the /// file from the filesystem. /// /// ```rust @@ -537,7 +537,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; /// let file_mgr1 = Arc::clone(&file_mgr); /// let rt = tokio::runtime::Handle::current(); /// -/// // Create an eviction lister closure. +/// // Create an eviction listener closure. /// let eviction_listener = move |k, v: PathBuf, cause| -> ListenerFuture { /// println!("\n== An entry has been evicted. k: {k:?}, v: {v:?}, cause: {cause:?}"); /// let file_mgr2 = Arc::clone(&file_mgr1); @@ -594,7 +594,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; /// } /// /// // Sleep for five seconds. While sleeping, the cache entry for key "user1" -/// // will be expired and evicted, so the eviction lister will be called to +/// // will be expired and evicted, so the eviction listener will be called to /// // remove the file. /// tokio::time::sleep(Duration::from_secs(5)).await; /// @@ -609,7 +609,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; /// It is very important to make an eviction listener closure not to panic. /// Otherwise, the cache will stop calling the listener after a panic. This is an /// intended behavior because the cache cannot know whether it is memory safe or not -/// to call the panicked lister again. +/// to call the panicked listener again. /// /// When a listener panics, the cache will swallow the panic and disable the /// listener. If you want to know when a listener panics and the reason of the panic, @@ -787,6 +787,7 @@ where None, build_hasher, None, + EvictionPolicy::default(), None, ExpirationPolicy::default(), false, @@ -816,6 +817,7 @@ where initial_capacity: Option, build_hasher: S, weigher: Option>, + eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, @@ -827,6 +829,7 @@ where initial_capacity, build_hasher.clone(), weigher, + eviction_policy, eviction_listener, expiration_policy, invalidator_enabled, @@ -2115,7 +2118,7 @@ mod tests { future::FutureExt, notification::{ListenerFuture, RemovalCause}, ops::compute, - policy::test_utils::ExpiryCallCounters, + policy::{test_utils::ExpiryCallCounters, EvictionPolicy}, Expiry, }; @@ -2313,6 +2316,107 @@ mod tests { assert!(cache.key_locks_map_is_empty()); } + #[tokio::test] + async fn basic_lru_single_thread() { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| -> ListenerFuture { + let a2 = Arc::clone(&a1); + async move { + a2.lock().await.push((k, v, cause)); + } + .boxed() + }; + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(3) + .eviction_policy(EvictionPolicy::lru()) + .async_eviction_listener(listener) + .build(); + cache.reconfigure_for_testing().await; + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("a", "alice").await; + cache.insert("b", "bob").await; + assert_eq!(cache.get(&"a").await, Some("alice")); + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert_eq!(cache.get(&"b").await, Some("bob")); + cache.run_pending_tasks().await; + // a -> b + + cache.insert("c", "cindy").await; + assert_eq!(cache.get(&"c").await, Some("cindy")); + assert!(cache.contains_key(&"c")); + cache.run_pending_tasks().await; + // a -> b -> c + + assert!(cache.contains_key(&"a")); + assert_eq!(cache.get(&"a").await, Some("alice")); + assert_eq!(cache.get(&"b").await, Some("bob")); + assert!(cache.contains_key(&"b")); + cache.run_pending_tasks().await; + // c -> a -> b + + // "d" should be admitted because the cache uses the LRU strategy. + cache.insert("d", "david").await; + // "c" is the LRU and should have be evicted. + expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); + cache.run_pending_tasks().await; + + assert_eq!(cache.get(&"a").await, Some("alice")); + assert_eq!(cache.get(&"b").await, Some("bob")); + assert_eq!(cache.get(&"c").await, None); + assert_eq!(cache.get(&"d").await, Some("david")); + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert!(!cache.contains_key(&"c")); + assert!(cache.contains_key(&"d")); + cache.run_pending_tasks().await; + // a -> b -> d + + cache.invalidate(&"b").await; + expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); + cache.run_pending_tasks().await; + // a -> d + assert_eq!(cache.get(&"b").await, None); + assert!(!cache.contains_key(&"b")); + + assert!(cache.remove(&"b").await.is_none()); + assert_eq!(cache.remove(&"d").await, Some("david")); + expected.push((Arc::new("d"), "david", RemovalCause::Explicit)); + cache.run_pending_tasks().await; + // a + assert_eq!(cache.get(&"d").await, None); + assert!(!cache.contains_key(&"d")); + + cache.insert("e", "emily").await; + cache.insert("f", "frank").await; + // "a" should be evicted because it is the LRU. + cache.insert("g", "gina").await; + expected.push((Arc::new("a"), "alice", RemovalCause::Size)); + cache.run_pending_tasks().await; + // e -> f -> g + assert_eq!(cache.get(&"a").await, None); + assert_eq!(cache.get(&"e").await, Some("emily")); + assert_eq!(cache.get(&"f").await, Some("frank")); + assert_eq!(cache.get(&"g").await, Some("gina")); + assert!(!cache.contains_key(&"a")); + assert!(cache.contains_key(&"e")); + assert!(cache.contains_key(&"f")); + assert!(cache.contains_key(&"g")); + + verify_notification_vec(&cache, actual, &expected).await; + assert!(cache.key_locks_map_is_empty()); + } + #[tokio::test] async fn size_aware_eviction() { let weigher = |_k: &&str, v: &(&str, u32)| v.1; diff --git a/src/lib.rs b/src/lib.rs index c56f4c69..4bd4a800 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -114,7 +114,7 @@ pub(crate) mod common; pub mod ops; #[cfg(any(feature = "sync", feature = "future"))] -pub(crate) mod policy; +pub mod policy; #[cfg(any(feature = "sync", feature = "future"))] pub(crate) mod sync_base; diff --git a/src/policy.rs b/src/policy.rs index 2856df22..edc65bb1 100644 --- a/src/policy.rs +++ b/src/policy.rs @@ -1,4 +1,5 @@ use std::{ + fmt, sync::Arc, time::{Duration, Instant}, }; @@ -58,6 +59,84 @@ impl Policy { } } +/// The eviction (and admission) policy of a cache. +/// +/// When the cache is full, the eviction/admission policy is used to determine which +/// items should be admitted to the cache and which cached items should be evicted. +/// The choice of a policy will directly affect the performance (hit rate) of the +/// cache. +/// +/// The following policies are available: +/// +/// - **TinyLFU** (default): +/// - Suitable for most workloads. +/// - TinyLFU combines the LRU eviction policy and an admission policy based on the +/// historical popularity of keys. +/// - Note that it tracks not only the keys currently in the cache, but all hit and +/// missed keys. The data structure used to _estimate_ the popularity of keys is +/// a modified Count-Min Sketch, which has a very low memory footprint (thus the +/// name "tiny"). +/// - **LRU**: +/// - Suitable for some workloads with strong recency bias, such as streaming data +/// processing. +/// +/// LFU stands for Least Frequently Used. LRU stands for Least Recently Used. +/// +/// Use associate function [`EvictionPolicy::tiny_lfu`](#method.tiny_lfu) or +/// [`EvictionPolicy::lru`](#method.lru) to obtain an instance of `EvictionPolicy`. +#[derive(Clone, Default)] +pub struct EvictionPolicy { + pub(crate) config: EvictionPolicyConfig, +} + +impl EvictionPolicy { + /// Returns the TinyLFU policy, which is suitable for most workloads. + /// + /// TinyLFU is a combination of the LRU eviction policy and the admission policy + /// based on the historical popularity of keys. + /// + /// Note that it tracks not only the keys currently in the cache, but all hit and + /// missed keys. The data structure used to _estimate_ the popularity of keys is + /// a modified Count-Min Sketch, which has a very low memory footprint (thus the + /// name "tiny"). + pub fn tiny_lfu() -> Self { + Self { + config: EvictionPolicyConfig::TinyLfu, + } + } + + /// Returns the LRU policy. + /// + /// Suitable for some workloads with strong recency bias, such as streaming data + /// processing. + pub fn lru() -> Self { + Self { + config: EvictionPolicyConfig::Lru, + } + } +} + +impl fmt::Debug for EvictionPolicy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.config { + EvictionPolicyConfig::TinyLfu => write!(f, "EvictionPolicy::TinyLfu"), + EvictionPolicyConfig::Lru => write!(f, "EvictionPolicy::Lru"), + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) enum EvictionPolicyConfig { + TinyLfu, + Lru, +} + +impl Default for EvictionPolicyConfig { + fn default() -> Self { + Self::TinyLfu + } +} + /// Calculates when cache entries expire. A single expiration time is retained on /// each entry so that the lifetime of an entry may be extended or reduced by /// subsequent evaluations. diff --git a/src/sync/builder.rs b/src/sync/builder.rs index 0309e8a0..cd26dd23 100644 --- a/src/sync/builder.rs +++ b/src/sync/builder.rs @@ -2,7 +2,7 @@ use super::{Cache, SegmentedCache}; use crate::{ common::{builder_utils, concurrent::Weigher}, notification::{EvictionListener, RemovalCause}, - policy::ExpirationPolicy, + policy::{EvictionPolicy, ExpirationPolicy}, Expiry, }; @@ -53,6 +53,7 @@ pub struct CacheBuilder { initial_capacity: Option, num_segments: Option, weigher: Option>, + eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, @@ -72,6 +73,7 @@ where num_segments: None, weigher: None, eviction_listener: None, + eviction_policy: EvictionPolicy::default(), expiration_policy: ExpirationPolicy::default(), invalidator_enabled: false, cache_type: PhantomData, @@ -110,6 +112,7 @@ where initial_capacity: self.initial_capacity, num_segments: Some(num_segments), weigher: self.weigher, + eviction_policy: self.eviction_policy, eviction_listener: self.eviction_listener, expiration_policy: self.expiration_policy, invalidator_enabled: self.invalidator_enabled, @@ -137,6 +140,7 @@ where self.initial_capacity, build_hasher, self.weigher, + self.eviction_policy, self.eviction_listener, self.expiration_policy, self.invalidator_enabled, @@ -223,6 +227,7 @@ where self.initial_capacity, hasher, self.weigher, + self.eviction_policy, self.eviction_listener, self.expiration_policy, self.invalidator_enabled, @@ -256,6 +261,7 @@ where self.num_segments.unwrap(), build_hasher, self.weigher, + self.eviction_policy, self.eviction_listener, self.expiration_policy, self.invalidator_enabled, @@ -344,6 +350,7 @@ where self.num_segments.unwrap(), hasher, self.weigher, + self.eviction_policy, self.eviction_listener, self.expiration_policy, self.invalidator_enabled, @@ -377,6 +384,19 @@ impl CacheBuilder { } } + /// Sets the eviction (and admission) policy of the cache. + /// + /// The default policy is TinyLFU. See [`EvictionPolicy`][eviction-policy] for + /// more details. + /// + /// [eviction-policy]: ../policy/struct.EvictionPolicy.html + pub fn eviction_policy(self, policy: EvictionPolicy) -> Self { + Self { + eviction_policy: policy, + ..self + } + } + /// Sets the weigher closure to the cache. /// /// The closure should take `&K` and `&V` as the arguments and returns a `u32` @@ -397,8 +417,8 @@ impl CacheBuilder { /// /// It is very important to make the listener closure not to panic. Otherwise, /// the cache will stop calling the listener after a panic. This is an intended - /// behavior because the cache cannot know whether is is memory safe or not to - /// call the panicked lister again. + /// behavior because the cache cannot know whether it is memory safe or not to + /// call the panicked listener again. /// /// [removal-cause]: ../notification/enum.RemovalCause.html pub fn eviction_listener( diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 58d67164..9e58e3ed 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -11,7 +11,7 @@ use crate::{ }, notification::EvictionListener, ops::compute::{self, CompResult}, - policy::ExpirationPolicy, + policy::{EvictionPolicy, ExpirationPolicy}, sync::{Iter, PredicateId}, sync_base::{ base_cache::{BaseCache, HouseKeeperArc}, @@ -266,8 +266,8 @@ use std::{ /// updated, one of these methods is called. These methods return an /// `Option`, which is used as the expiration duration of the entry. /// -/// `Expiry` trait provides the default implementations of these methods, so you -/// will implement only the methods you want to customize. +/// `Expiry` trait provides the default implementations of these methods, so you will +/// implement only the methods you want to customize. /// /// [exp-create]: ../trait.Expiry.html#method.expire_after_create /// [exp-read]: ../trait.Expiry.html#method.expire_after_read @@ -393,7 +393,7 @@ use std::{ /// The following example demonstrates how to use an eviction listener with /// time-to-live expiration to manage the lifecycle of temporary files on a /// filesystem. The cache stores the paths of the files, and when one of them has -/// expired, the eviction lister will be called with the path, so it can remove the +/// expired, the eviction listener will be called with the path, so it can remove the /// file from the filesystem. /// /// ```rust @@ -478,7 +478,7 @@ use std::{ /// /// let file_mgr1 = Arc::clone(&file_mgr); /// -/// // Create an eviction lister closure. +/// // Create an eviction listener closure. /// let eviction_listener = move |k, v: PathBuf, cause| { /// // Try to remove the data file at the path `v`. /// println!("\n== An entry has been evicted. k: {k:?}, v: {v:?}, cause: {cause:?}"); @@ -536,7 +536,7 @@ use std::{ /// } /// /// // Sleep for five seconds. While sleeping, the cache entry for key "user1" -/// // will be expired and evicted, so the eviction lister will be called to +/// // will be expired and evicted, so the eviction listener will be called to /// // remove the file. /// std::thread::sleep(Duration::from_secs(5)); /// @@ -551,7 +551,7 @@ use std::{ /// It is very important to make an eviction listener closure not to panic. /// Otherwise, the cache will stop calling the listener after a panic. This is an /// intended behavior because the cache cannot know whether it is memory safe or not -/// to call the panicked lister again. +/// to call the panicked listener again. /// /// When a listener panics, the cache will swallow the panic and disable the /// listener. If you want to know when a listener panics and the reason of the panic, @@ -707,6 +707,7 @@ where None, build_hasher, None, + EvictionPolicy::default(), None, ExpirationPolicy::default(), false, @@ -736,6 +737,7 @@ where initial_capacity: Option, build_hasher: S, weigher: Option>, + eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, @@ -747,6 +749,7 @@ where initial_capacity, build_hasher.clone(), weigher, + eviction_policy, eviction_listener, expiration_policy, invalidator_enabled, @@ -1904,7 +1907,9 @@ where mod tests { use super::Cache; use crate::{ - common::time::Clock, notification::RemovalCause, policy::test_utils::ExpiryCallCounters, + common::time::Clock, + notification::RemovalCause, + policy::{test_utils::ExpiryCallCounters, EvictionPolicy}, Expiry, }; @@ -2019,6 +2024,101 @@ mod tests { assert!(cache.key_locks_map_is_empty()); } + #[test] + fn basic_lru_single_thread() { + // The following `Vec`s will hold actual and expected notifications. + let actual = Arc::new(Mutex::new(Vec::new())); + let mut expected = Vec::new(); + + // Create an eviction listener. + let a1 = Arc::clone(&actual); + let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + + // Create a cache with the eviction listener. + let mut cache = Cache::builder() + .max_capacity(3) + .eviction_policy(EvictionPolicy::lru()) + .eviction_listener(listener) + .build(); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert("a", "alice"); + cache.insert("b", "bob"); + assert_eq!(cache.get(&"a"), Some("alice")); + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert_eq!(cache.get(&"b"), Some("bob")); + cache.run_pending_tasks(); + // a -> b + + cache.insert("c", "cindy"); + assert_eq!(cache.get(&"c"), Some("cindy")); + assert!(cache.contains_key(&"c")); + cache.run_pending_tasks(); + // a -> b -> c + + assert!(cache.contains_key(&"a")); + assert_eq!(cache.get(&"a"), Some("alice")); + assert_eq!(cache.get(&"b"), Some("bob")); + assert!(cache.contains_key(&"b")); + cache.run_pending_tasks(); + // c -> a -> b + + // "d" should be admitted because the cache uses the LRU strategy. + cache.insert("d", "david"); + // "c" is the LRU and should have be evicted. + expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); + cache.run_pending_tasks(); + + assert_eq!(cache.get(&"a"), Some("alice")); + assert_eq!(cache.get(&"b"), Some("bob")); + assert_eq!(cache.get(&"c"), None); + assert_eq!(cache.get(&"d"), Some("david")); + assert!(cache.contains_key(&"a")); + assert!(cache.contains_key(&"b")); + assert!(!cache.contains_key(&"c")); + assert!(cache.contains_key(&"d")); + cache.run_pending_tasks(); + // a -> b -> d + + cache.invalidate(&"b"); + expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); + cache.run_pending_tasks(); + // a -> d + assert_eq!(cache.get(&"b"), None); + assert!(!cache.contains_key(&"b")); + + assert!(cache.remove(&"b").is_none()); + assert_eq!(cache.remove(&"d"), Some("david")); + expected.push((Arc::new("d"), "david", RemovalCause::Explicit)); + cache.run_pending_tasks(); + // a + assert_eq!(cache.get(&"d"), None); + assert!(!cache.contains_key(&"d")); + + cache.insert("e", "emily"); + cache.insert("f", "frank"); + // "a" should be evicted because it is the LRU. + cache.insert("g", "gina"); + expected.push((Arc::new("a"), "alice", RemovalCause::Size)); + cache.run_pending_tasks(); + // e -> f -> g + assert_eq!(cache.get(&"a"), None); + assert_eq!(cache.get(&"e"), Some("emily")); + assert_eq!(cache.get(&"f"), Some("frank")); + assert_eq!(cache.get(&"g"), Some("gina")); + assert!(!cache.contains_key(&"a")); + assert!(cache.contains_key(&"e")); + assert!(cache.contains_key(&"f")); + assert!(cache.contains_key(&"g")); + + verify_notification_vec(&cache, actual, &expected); + assert!(cache.key_locks_map_is_empty()); + } + #[test] fn size_aware_eviction() { let weigher = |_k: &&str, v: &(&str, u32)| v.1; diff --git a/src/sync/segment.rs b/src/sync/segment.rs index 53251ae9..fb3a9cf0 100644 --- a/src/sync/segment.rs +++ b/src/sync/segment.rs @@ -2,7 +2,7 @@ use super::{cache::Cache, CacheBuilder, OwnedKeyEntrySelector, RefKeyEntrySelect use crate::{ common::concurrent::Weigher, notification::EvictionListener, - policy::ExpirationPolicy, + policy::{EvictionPolicy, ExpirationPolicy}, sync_base::iter::{Iter, ScanningGet}, Entry, Policy, PredicateError, }; @@ -102,6 +102,7 @@ where num_segments, build_hasher, None, + EvictionPolicy::default(), None, ExpirationPolicy::default(), false, @@ -207,6 +208,7 @@ where num_segments: usize, build_hasher: S, weigher: Option>, + eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, @@ -219,6 +221,7 @@ where num_segments, build_hasher, weigher, + eviction_policy, eviction_listener, expiration_policy, invalidator_enabled, @@ -729,6 +732,7 @@ where num_segments: usize, build_hasher: S, weigher: Option>, + eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, @@ -751,6 +755,7 @@ where seg_init_capacity, build_hasher.clone(), weigher.as_ref().map(Arc::clone), + eviction_policy.clone(), eviction_listener.as_ref().map(Arc::clone), expiration_policy.clone(), invalidator_enabled, diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index 304083dc..ab0b3d66 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -26,7 +26,7 @@ use crate::{ CacheRegion, }, notification::{notifier::RemovalNotifier, EvictionListener, RemovalCause}, - policy::ExpirationPolicy, + policy::{EvictionPolicy, EvictionPolicyConfig, ExpirationPolicy}, Entry, Expiry, Policy, PredicateError, }; @@ -142,6 +142,7 @@ where initial_capacity: Option, build_hasher: S, weigher: Option>, + eviction_policy: EvictionPolicy, eviction_listener: Option>, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, @@ -161,6 +162,7 @@ where initial_capacity, build_hasher, weigher, + eviction_policy, eviction_listener, r_rcv, w_rcv, @@ -901,6 +903,7 @@ pub(crate) struct Inner { frequency_sketch_enabled: AtomicBool, read_op_ch: Receiver>, write_op_ch: Receiver>, + eviction_policy: EvictionPolicyConfig, expiration_policy: ExpirationPolicy, valid_after: AtomicInstant, weigher: Option>, @@ -1038,6 +1041,7 @@ where initial_capacity: Option, build_hasher: S, weigher: Option>, + eviction_policy: EvictionPolicy, eviction_listener: Option>, read_op_ch: Receiver>, write_op_ch: Receiver>, @@ -1094,6 +1098,7 @@ where frequency_sketch_enabled: AtomicBool::default(), read_op_ch, write_op_ch, + eviction_policy: eviction_policy.config, expiration_policy, valid_after: AtomicInstant::default(), weigher, @@ -1285,7 +1290,9 @@ where self.apply_writes(&mut deqs, &mut timer_wheel, w_len, &mut eviction_state); } - if self.should_enable_frequency_sketch(&eviction_state.counters) { + if self.eviction_policy == EvictionPolicyConfig::TinyLfu + && self.should_enable_frequency_sketch(&eviction_state.counters) + { self.enable_frequency_sketch(&eviction_state.counters); } @@ -1554,11 +1561,22 @@ where } } - let mut candidate = EntrySizeAndFrequency::new(new_weight); - candidate.add_frequency(freq, kh.hash); + // TODO: Refactoring the policy implementations. + // https://github.com/moka-rs/moka/issues/389 // Try to admit the candidate. - match Self::admit(&candidate, &self.cache, deqs, freq) { + let admission_result = match &self.eviction_policy { + EvictionPolicyConfig::TinyLfu => { + let mut candidate = EntrySizeAndFrequency::new(new_weight); + candidate.add_frequency(freq, kh.hash); + Self::admit(&candidate, &self.cache, deqs, freq) + } + EvictionPolicyConfig::Lru => AdmissionResult::Admitted { + victim_keys: SmallVec::default(), + }, + }; + + match admission_result { AdmissionResult::Admitted { victim_keys } => { // Try to remove the victims from the hash map. for (vic_kh, vic_la) in victim_keys { @@ -2497,7 +2515,7 @@ fn is_expired_by_ttl( #[cfg(test)] mod tests { - use crate::policy::ExpirationPolicy; + use crate::policy::{EvictionPolicy, ExpirationPolicy}; use super::BaseCache; @@ -2516,8 +2534,9 @@ mod tests { None, RandomState::default(), None, + EvictionPolicy::default(), None, - Default::default(), + ExpirationPolicy::default(), false, ); cache.inner.enable_frequency_sketch_for_testing(); @@ -2861,6 +2880,7 @@ mod tests { None, RandomState::default(), None, + EvictionPolicy::default(), None, ExpirationPolicy::new( Some(Duration::from_secs(TTL)),