From 781988cc113c772d1898ebeac316f1afb976a799 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 31 Dec 2023 10:39:50 +0800 Subject: [PATCH 01/16] Compute API - Add entry `and_upsert_with` method to `future::Cache` --- Cargo.toml | 8 ++ examples/append_value_async.rs | 60 +++++++++++++ examples/counter_async.rs | 37 ++++++++ src/future.rs | 3 + src/future/cache.rs | 44 ++++++++- src/future/entry_selector.rs | 10 +++ src/future/value_initializer.rs | 152 +++++++++++++++++++++++++++++++- src/lib.rs | 4 + src/ops.rs | 28 ++++++ 9 files changed, 342 insertions(+), 4 deletions(-) create mode 100644 examples/append_value_async.rs create mode 100644 examples/counter_async.rs create mode 100644 src/ops.rs diff --git a/Cargo.toml b/Cargo.toml index d89d6402..c21baad0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -106,6 +106,10 @@ rustdoc-args = ["--cfg", "docsrs"] # Examples +[[example]] +name = "append_value_async" +required-features = ["future"] + [[example]] name = "basics_async" required-features = ["future"] @@ -118,6 +122,10 @@ required-features = ["sync"] name = "cascading_drop_async" required-features = ["future"] +[[example]] +name = "counter_async" +required-features = ["future"] + [[example]] name = "eviction_listener_sync" required-features = ["sync"] diff --git a/examples/append_value_async.rs b/examples/append_value_async.rs new file mode 100644 index 00000000..5126974c --- /dev/null +++ b/examples/append_value_async.rs @@ -0,0 +1,60 @@ +use std::sync::Arc; + +use moka::{future::Cache, Entry}; +use tokio::sync::RwLock; + +/// This example demonstrates how to append an `i32` value to a cached `Vec` +/// value. It uses the `and_upsert_with` method of `Cache`. +#[tokio::main] +async fn main() { + // We want to store a raw value `Vec` for each `String` key. We are going to + // append `i32` values to the `Vec` in the cache. + // + // Note that we have to wrap the `Vec` in an `Arc>`. We need the `Arc`, + // an atomic reference counted shared pointer, because `and_upsert_with` method + // of `Cache` passes a _clone_ of the value to our closure, instead of passing a + // `&mut` reference. We do not want to clone the `Vec` every time we append a + // value to it, so we wrap it in an `Arc`. Then we need the `RwLock` because we + // mutate the `Vec` when we append a value to it. + // + // The reason that `and_upsert_with` cannot pass a `&mut Vec<_>` to the closure + // is because the internal concurrent hash table of `Cache` is a lock free data + // structure and does not use any mutexes. So it cannot guarantee: (1) the `&mut + // Vec<_>` is unique, and (2) it is not accessed concurrently by other threads. + let cache: Cache>>> = Cache::new(100); + + let key = "key".to_string(); + + let entry = append_to_cached_vec(&cache, &key, 1).await; + // assert_eq!(performed_op, PerformedOp::Inserted); + assert_eq!(*entry.into_value().read().await, &[1]); + + let entry = append_to_cached_vec(&cache, &key, 2).await; + // assert_eq!(performed_op, PerformedOp::Updated); + assert_eq!(*entry.into_value().read().await, &[1, 2]); + + let entry = append_to_cached_vec(&cache, &key, 3).await; + assert_eq!(*entry.into_value().read().await, &[1, 2, 3]); +} + +async fn append_to_cached_vec( + cache: &Cache>>>, + key: &str, + value: i32, +) -> Entry>>> { + cache + .entry_by_ref(key) + .and_upsert_with(|maybe_entry| async { + if let Some(entry) = maybe_entry { + // The entry exists, append the value to the Vec. + let v = entry.into_value(); + v.write().await.push(value); + v + } else { + // The entry does not exist, insert a new Vec containing + // the value. + Arc::new(RwLock::new(vec![value])) + } + }) + .await +} diff --git a/examples/counter_async.rs b/examples/counter_async.rs new file mode 100644 index 00000000..a1733a2a --- /dev/null +++ b/examples/counter_async.rs @@ -0,0 +1,37 @@ +use moka::{future::Cache, Entry}; + +/// This example demonstrates how to increment a cached `u64` counter. It uses the +/// `and_upsert_with` method of `Cache`. +#[tokio::main] +async fn main() { + let cache: Cache = Cache::new(100); + let key = "key".to_string(); + + let entry = inclement_counter(&cache, &key).await; + // assert_eq!(performed_op, PerformedOp::Inserted); + assert_eq!(entry.into_value(), 1); + + let entry = inclement_counter(&cache, &key).await; + // assert_eq!(performed_op, PerformedOp::Updated); + assert_eq!(entry.into_value(), 2); + + let entry = inclement_counter(&cache, &key).await; + assert_eq!(entry.into_value(), 3); +} + +async fn inclement_counter(cache: &Cache, key: &str) -> Entry { + cache + .entry_by_ref(key) + .and_upsert_with(|maybe_entry| { + let v = if let Some(entry) = maybe_entry { + // The entry exists, increment the value by 1. + entry.into_value().saturating_add(1) + } else { + // The entry does not exist, insert a new value of 1. + 1 + }; + // Return a Future that is resolved to `v` immediately. + std::future::ready(v) + }) + .await +} diff --git a/src/future.rs b/src/future.rs index d15cd4ef..483b5785 100644 --- a/src/future.rs +++ b/src/future.rs @@ -38,6 +38,9 @@ pub(crate) type PredicateIdStr<'a> = &'a str; // Empty struct to be used in InitResult::InitErr to represent the Option None. pub(crate) struct OptionallyNone; +// Empty struct to be used in InitResult::InitErr to represent the Compute None. +pub(crate) struct ComputeNone; + impl FutureExt for T where T: Future {} pub trait FutureExt: Future { diff --git a/src/future/cache.rs b/src/future/cache.rs index 09ddc8ba..e3b440cd 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -1,6 +1,6 @@ use super::{ base_cache::BaseCache, - value_initializer::{GetOrInsert, InitResult, ValueInitializer}, + value_initializer::{ComputeResult, GetOrInsert, InitResult, ValueInitializer}, CacheBuilder, CancelGuard, Iter, OwnedKeyEntrySelector, PredicateId, RefKeyEntrySelector, WriteOp, }; @@ -1830,6 +1830,41 @@ where cancel_guard.clear(); } + pub(crate) async fn upsert_with_hash_by_ref_and_fun( + &self, + key: &Q, + hash: u64, + f: F, + ) -> Entry + where + K: Borrow, + Q: ToOwned + Hash + Eq + ?Sized, + F: FnOnce(Option>) -> Fut, + Fut: Future, + { + let key = Arc::new(key.to_owned()); + let type_id = ValueInitializer::::type_id_for_compute_with(); + let post_init = ValueInitializer::::post_init_for_compute_with; + + match self + .value_initializer + .try_compute(&key, hash, type_id, self, f, post_init) + .await + { + ComputeResult::Inserted(value) => { + crossbeam_epoch::pin().flush(); + Entry::new(Some(key), value, true) + } + ComputeResult::Updated(value) => { + crossbeam_epoch::pin().flush(); + Entry::new(Some(key), value, false) + } + ComputeResult::Nop(_) | ComputeResult::Removed(_) | ComputeResult::EvalErr(_) => { + unreachable!() + } + } + } + async fn invalidate_with_hash(&self, key: &Q, hash: u64, need_value: bool) -> Option where K: Borrow, @@ -1966,6 +2001,13 @@ where .map(Entry::into_value) } + async fn get_entry_without_recording(&self, key: &Arc, hash: u64) -> Option> { + let ignore_if = None as Option<&mut fn(&V) -> bool>; + self.base + .get_with_hash(key, hash, ignore_if, true, false) + .await + } + async fn insert(&self, key: Arc, hash: u64, value: V) { self.insert_with_hash(key.clone(), hash, value).await; } diff --git a/src/future/entry_selector.rs b/src/future/entry_selector.rs index d4954b34..94957c71 100644 --- a/src/future/entry_selector.rs +++ b/src/future/entry_selector.rs @@ -704,4 +704,14 @@ where .get_or_try_insert_with_hash_by_ref_and_fun(self.ref_key, self.hash, init, true) .await } + + pub async fn and_upsert_with(self, f: F) -> Entry + where + F: FnOnce(Option>) -> Fut, + Fut: Future, + { + self.cache + .upsert_with_hash_by_ref_and_fun(self.ref_key, self.hash, f) + .await + } } diff --git a/src/future/value_initializer.rs b/src/future/value_initializer.rs index 45a6f0b2..54850d53 100644 --- a/src/future/value_initializer.rs +++ b/src/future/value_initializer.rs @@ -3,6 +3,7 @@ use async_trait::async_trait; use futures_util::FutureExt; use std::{ any::{Any, TypeId}, + fmt, future::Future, hash::{BuildHasher, Hash}, pin::Pin, @@ -10,7 +11,9 @@ use std::{ }; use triomphe::Arc as TrioArc; -use super::OptionallyNone; +use crate::{ops::compute, Entry}; + +use super::{ComputeNone, OptionallyNone}; const WAITER_MAP_NUM_SEGMENTS: usize = 64; @@ -26,6 +29,10 @@ pub(crate) trait GetOrInsert { V: 'static, I: for<'i> FnMut(&'i V) -> bool + Send; + async fn get_entry_without_recording(&self, key: &Arc, hash: u64) -> Option> + where + V: 'static; + async fn insert(&self, key: Arc, hash: u64, value: V); } @@ -37,15 +44,37 @@ pub(crate) enum InitResult { InitErr(Arc), } +pub(crate) enum ComputeResult { + Inserted(V), + Updated(V), + #[allow(unused)] + Removed(V), + Nop(Option), + EvalErr(E), +} + enum WaiterValue { Computing, Ready(Result), + ReadyNone, // https://github.com/moka-rs/moka/issues/43 InitFuturePanicked, // https://github.com/moka-rs/moka/issues/59 EnclosingFutureAborted, } +impl fmt::Debug for WaiterValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + WaiterValue::Computing => write!(f, "Computing"), + WaiterValue::Ready(_) => write!(f, "Ready"), + WaiterValue::ReadyNone => write!(f, "ReadyNone"), + WaiterValue::InitFuturePanicked => write!(f, "InitFuturePanicked"), + WaiterValue::EnclosingFutureAborted => write!(f, "EnclosingFutureAborted"), + } + } +} + type Waiter = TrioArc>>; type WaiterMap = crate::cht::SegmentedHashMap<(Arc, TypeId), Waiter, S>; @@ -203,8 +232,8 @@ where continue; } // Unexpected state. - WaiterValue::Computing => panic!( - "Got unexpected state `Computing` after resolving `init` future. \ + s @ (WaiterValue::Computing | WaiterValue::ReadyNone) => panic!( + "Got unexpected state `{s:?}` after resolving `init` future. \ This might be a bug in Moka" ), } @@ -254,6 +283,113 @@ where // The lock will be unlocked here. } + /// # Panics + /// Panics if the `init` future has been panicked. + pub(crate) async fn try_compute<'a, C, F, Fut, O, E>( + &'a self, + c_key: &Arc, + c_hash: u64, + type_id: TypeId, + cache: &C, // Future to initialize a new value. + f: F, + post_init: fn(O) -> Result, E>, + ) -> ComputeResult + where + C: GetOrInsert + Send + 'a, + F: FnOnce(Option>) -> Fut, + Fut: Future + 'a, + E: Send + Sync + 'static, + { + use std::panic::{resume_unwind, AssertUnwindSafe}; + use ComputeResult::{EvalErr, Inserted, Nop, Updated}; + + let (w_key, w_hash) = waiter_key_hash(&self.waiters, c_key, type_id); + + let waiter = TrioArc::new(RwLock::new(WaiterValue::Computing)); + // NOTE: We have to acquire a write lock before `try_insert_waiter`, + // so that any concurrent attempt will get our lock and wait on it. + let lock = waiter.write().await; + + loop { + let Some(existing_waiter) = + try_insert_waiter(&self.waiters, w_key.clone(), w_hash, &waiter) + else { + break; + }; + + // Somebody else's waiter already exists, so wait for its result to become available. + let waiter_result = existing_waiter.read().await; + match &*waiter_result { + // Unexpected state. + WaiterValue::Computing => panic!( + "Got unexpected state `Computing` after resolving `init` future. \ + This might be a bug in Moka" + ), + _ => { + // Retry from the beginning. + continue; + } + } + } + + // Our waiter was inserted. + + // Create a guard. This will ensure to remove our waiter when the + // enclosing future has been aborted: + // https://github.com/moka-rs/moka/issues/59 + let waiter_guard = WaiterGuard::new(w_key, w_hash, &self.waiters, lock); + + // Get the current value. + let maybe_entry = cache.get_entry_without_recording(c_key, c_hash).await; + // TODO: Avoid cloning if possible. + let maybe_value = maybe_entry.as_ref().map(|ent| ent.value().clone()); + let entry_existed = maybe_entry.is_some(); + + // Let's evaluate the `f` closure and get a future. Catching panic is safe + // here as we will not evaluate the closure again. + let fut = match std::panic::catch_unwind(AssertUnwindSafe(|| f(maybe_entry))) { + // Evaluated. + Ok(fut) => fut, + // Panicked. + Err(payload) => { + waiter_guard.set_waiter_value(WaiterValue::InitFuturePanicked); + resume_unwind(payload); + } + }; + + // Resolve the `fut` future. Catching panic is safe here as we will not + // resolve the future again. + match AssertUnwindSafe(fut).catch_unwind().await { + // Resolved. + Ok(op) => { + waiter_guard.set_waiter_value(WaiterValue::ReadyNone); + match post_init(op) { + Ok(op) => match op { + compute::Op::Nop => Nop(maybe_value), + compute::Op::Put(value) => { + cache.insert(Arc::clone(c_key), c_hash, value.clone()).await; + if entry_existed { + Updated(value) + } else { + Inserted(value) + } + } + compute::Op::Remove => { + todo!() + } + }, + Err(e) => EvalErr(e), + } + } + // Panicked. + Err(payload) => { + waiter_guard.set_waiter_value(WaiterValue::InitFuturePanicked); + resume_unwind(payload); + } + } + // The lock will be unlocked here. + } + /// The `post_init` function for the `get_with` method of cache. pub(crate) fn post_init_for_get_with(value: V) -> Result { Ok(value) @@ -275,6 +411,11 @@ where result } + /// The `post_init` function for the `and_compute_with` method of cache. + pub(crate) fn post_init_for_compute_with(op: V) -> Result, ()> { + Ok(compute::Op::Put(op)) + } + /// Returns the `type_id` for `get_with` method of cache. pub(crate) fn type_id_for_get_with() -> TypeId { // NOTE: We use a regular function here instead of a const fn because TypeId @@ -291,6 +432,11 @@ where pub(crate) fn type_id_for_try_get_with() -> TypeId { TypeId::of::() } + + /// Returns the `type_id` for `and_compute_with` method of cache. + pub(crate) fn type_id_for_compute_with() -> TypeId { + TypeId::of::() + } } #[inline] diff --git a/src/lib.rs b/src/lib.rs index e05e09cb..c56f4c69 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -109,6 +109,10 @@ pub(crate) mod cht; #[cfg(any(feature = "sync", feature = "future"))] pub(crate) mod common; +#[cfg(any(feature = "sync", feature = "future"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "sync", feature = "future"))))] +pub mod ops; + #[cfg(any(feature = "sync", feature = "future"))] pub(crate) mod policy; diff --git a/src/ops.rs b/src/ops.rs new file mode 100644 index 00000000..fee88f1d --- /dev/null +++ b/src/ops.rs @@ -0,0 +1,28 @@ +pub mod compute { + + /// Instructs the `and_compute` method how to modify the cache entry. + pub enum Op { + /// No-op. Do not modify the cached entry. + Nop, + /// Insert or replace the value of the cached entry. + Put(V), + /// Remove the cached entry. + Remove, + } + + /// Will be returned from `and_compute_with` and similar methods to indicate what + /// kind of operation was performed. + pub enum PerformedOp { + /// The entry did not exist, or already existed but was not modified. + Nop, + /// The entry did not exist and inserted. + Inserted, + /// The entry already existed and its value was updated. + Updated, + /// The entry existed and was removed. + /// + /// Note: If `and_compute_with` tried to remove a not-exiting entry, `Nop` + /// will be returned. + Remove, + } +} From 5e409b5023c04f7b41f1a816f072724eec2e5c46 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 31 Dec 2023 16:51:39 +0800 Subject: [PATCH 02/16] Compute API - Add `is_updated` method to `Entry` --- examples/append_value_async.rs | 8 ++++++-- examples/counter_async.rs | 8 ++++++-- src/common/entry.rs | 10 +++++++++- src/future/base_cache.rs | 2 +- src/future/cache.rs | 27 ++++++++++++--------------- src/future/entry_selector.rs | 14 +++++++++++++- src/sync/cache.rs | 16 ++++++++-------- src/sync_base/base_cache.rs | 2 +- 8 files changed, 56 insertions(+), 31 deletions(-) diff --git a/examples/append_value_async.rs b/examples/append_value_async.rs index 5126974c..6b1f0314 100644 --- a/examples/append_value_async.rs +++ b/examples/append_value_async.rs @@ -26,14 +26,18 @@ async fn main() { let key = "key".to_string(); let entry = append_to_cached_vec(&cache, &key, 1).await; - // assert_eq!(performed_op, PerformedOp::Inserted); + assert!(entry.is_fresh()); + assert!(!entry.is_updated()); assert_eq!(*entry.into_value().read().await, &[1]); let entry = append_to_cached_vec(&cache, &key, 2).await; - // assert_eq!(performed_op, PerformedOp::Updated); + assert!(entry.is_fresh()); + assert!(entry.is_updated()); assert_eq!(*entry.into_value().read().await, &[1, 2]); let entry = append_to_cached_vec(&cache, &key, 3).await; + assert!(entry.is_fresh()); + assert!(entry.is_updated()); assert_eq!(*entry.into_value().read().await, &[1, 2, 3]); } diff --git a/examples/counter_async.rs b/examples/counter_async.rs index a1733a2a..55ef880d 100644 --- a/examples/counter_async.rs +++ b/examples/counter_async.rs @@ -8,14 +8,18 @@ async fn main() { let key = "key".to_string(); let entry = inclement_counter(&cache, &key).await; - // assert_eq!(performed_op, PerformedOp::Inserted); + assert!(entry.is_fresh()); + assert!(!entry.is_updated()); assert_eq!(entry.into_value(), 1); let entry = inclement_counter(&cache, &key).await; - // assert_eq!(performed_op, PerformedOp::Updated); + assert!(entry.is_fresh()); + assert!(entry.is_updated()); assert_eq!(entry.into_value(), 2); let entry = inclement_counter(&cache, &key).await; + assert!(entry.is_fresh()); + assert!(entry.is_updated()); assert_eq!(entry.into_value(), 3); } diff --git a/src/common/entry.rs b/src/common/entry.rs index 7a762090..99d40a4d 100644 --- a/src/common/entry.rs +++ b/src/common/entry.rs @@ -21,6 +21,7 @@ pub struct Entry { key: Option>, value: V, is_fresh: bool, + is_updated: bool, } impl Debug for Entry @@ -33,16 +34,18 @@ where .field("key", self.key()) .field("value", &self.value) .field("is_fresh", &self.is_fresh) + .field("is_updated", &self.is_updated) .finish() } } impl Entry { - pub(crate) fn new(key: Option>, value: V, is_fresh: bool) -> Self { + pub(crate) fn new(key: Option>, value: V, is_fresh: bool, is_updated: bool) -> Self { Self { key, value, is_fresh, + is_updated, } } @@ -72,4 +75,9 @@ impl Entry { pub fn is_fresh(&self) -> bool { self.is_fresh } + + /// Returns `true` if the value in this `Entry` replaced an old cached value. + pub fn is_updated(&self) -> bool { + self.is_updated + } } diff --git a/src/future/base_cache.rs b/src/future/base_cache.rs index c5f5233c..41e94aac 100644 --- a/src/future/base_cache.rs +++ b/src/future/base_cache.rs @@ -326,7 +326,7 @@ where entry.set_last_accessed(now); let maybe_key = if need_key { Some(Arc::clone(k)) } else { None }; - let ent = Entry::new(maybe_key, entry.value.clone(), false); + let ent = Entry::new(maybe_key, entry.value.clone(), false, false); let maybe_op = if record_read { Some(ReadOp::Hit { value_entry: TrioArc::clone(entry), diff --git a/src/future/cache.rs b/src/future/cache.rs index e3b440cd..d3d807c5 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -1575,9 +1575,9 @@ where { InitResult::Initialized(v) => { crossbeam_epoch::pin().flush(); - Entry::new(k, v, true) + Entry::new(k, v, true, false) } - InitResult::ReadExisting(v) => Entry::new(k, v, false), + InitResult::ReadExisting(v) => Entry::new(k, v, false, false), InitResult::InitErr(_) => unreachable!(), } } @@ -1598,7 +1598,7 @@ where let value = init(); self.insert_with_hash(Arc::clone(&key), hash, value.clone()) .await; - Entry::new(Some(key), value, true) + Entry::new(Some(key), value, true, false) } } } @@ -1624,7 +1624,7 @@ where let value = init(); self.insert_with_hash(Arc::clone(&key), hash, value.clone()) .await; - Entry::new(Some(key), value, true) + Entry::new(Some(key), value, true, false) } } } @@ -1702,9 +1702,9 @@ where { InitResult::Initialized(v) => { crossbeam_epoch::pin().flush(); - Some(Entry::new(k, v, true)) + Some(Entry::new(k, v, true, false)) } - InitResult::ReadExisting(v) => Some(Entry::new(k, v, false)), + InitResult::ReadExisting(v) => Some(Entry::new(k, v, false, false)), InitResult::InitErr(_) => None, } } @@ -1784,9 +1784,9 @@ where { InitResult::Initialized(v) => { crossbeam_epoch::pin().flush(); - Ok(Entry::new(k, v, true)) + Ok(Entry::new(k, v, true, false)) } - InitResult::ReadExisting(v) => Ok(Entry::new(k, v, false)), + InitResult::ReadExisting(v) => Ok(Entry::new(k, v, false, false)), InitResult::InitErr(e) => { crossbeam_epoch::pin().flush(); Err(e) @@ -1830,19 +1830,16 @@ where cancel_guard.clear(); } - pub(crate) async fn upsert_with_hash_by_ref_and_fun( + pub(crate) async fn upsert_with_hash_and_fun( &self, - key: &Q, + key: Arc, hash: u64, f: F, ) -> Entry where - K: Borrow, - Q: ToOwned + Hash + Eq + ?Sized, F: FnOnce(Option>) -> Fut, Fut: Future, { - let key = Arc::new(key.to_owned()); let type_id = ValueInitializer::::type_id_for_compute_with(); let post_init = ValueInitializer::::post_init_for_compute_with; @@ -1853,11 +1850,11 @@ where { ComputeResult::Inserted(value) => { crossbeam_epoch::pin().flush(); - Entry::new(Some(key), value, true) + Entry::new(Some(key), value, true, false) } ComputeResult::Updated(value) => { crossbeam_epoch::pin().flush(); - Entry::new(Some(key), value, false) + Entry::new(Some(key), value, true, true) } ComputeResult::Nop(_) | ComputeResult::Removed(_) | ComputeResult::EvalErr(_) => { unreachable!() diff --git a/src/future/entry_selector.rs b/src/future/entry_selector.rs index 94957c71..596ea20b 100644 --- a/src/future/entry_selector.rs +++ b/src/future/entry_selector.rs @@ -353,6 +353,17 @@ where .get_or_try_insert_with_hash_and_fun(key, self.hash, init, true) .await } + + pub async fn and_upsert_with(self, f: F) -> Entry + where + F: FnOnce(Option>) -> Fut, + Fut: Future, + { + let key = Arc::new(self.owned_key); + self.cache + .upsert_with_hash_and_fun(key, self.hash, f) + .await + } } /// Provides advanced methods to select or insert an entry of the cache. @@ -710,8 +721,9 @@ where F: FnOnce(Option>) -> Fut, Fut: Future, { + let key = Arc::new(self.ref_key.to_owned()); self.cache - .upsert_with_hash_by_ref_and_fun(self.ref_key, self.hash, f) + .upsert_with_hash_and_fun(key, self.hash, f) .await } } diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 3ab1677d..54986a38 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -1047,9 +1047,9 @@ where { InitResult::Initialized(v) => { crossbeam_epoch::pin().flush(); - Entry::new(k, v, true) + Entry::new(k, v, true, false) } - InitResult::ReadExisting(v) => Entry::new(k, v, false), + InitResult::ReadExisting(v) => Entry::new(k, v, false, false), InitResult::InitErr(_) => unreachable!(), } } @@ -1065,7 +1065,7 @@ where None => { let value = init(); self.insert_with_hash(Arc::clone(&key), hash, value.clone()); - Entry::new(Some(key), value, true) + Entry::new(Some(key), value, true, false) } } } @@ -1086,7 +1086,7 @@ where let key = Arc::new(key.to_owned()); let value = init(); self.insert_with_hash(Arc::clone(&key), hash, value.clone()); - Entry::new(Some(key), value, true) + Entry::new(Some(key), value, true, false) } } } @@ -1271,9 +1271,9 @@ where { InitResult::Initialized(v) => { crossbeam_epoch::pin().flush(); - Some(Entry::new(k, v, true)) + Some(Entry::new(k, v, true, false)) } - InitResult::ReadExisting(v) => Some(Entry::new(k, v, false)), + InitResult::ReadExisting(v) => Some(Entry::new(k, v, false, false)), InitResult::InitErr(_) => { crossbeam_epoch::pin().flush(); None @@ -1465,9 +1465,9 @@ where { InitResult::Initialized(v) => { crossbeam_epoch::pin().flush(); - Ok(Entry::new(k, v, true)) + Ok(Entry::new(k, v, true, false)) } - InitResult::ReadExisting(v) => Ok(Entry::new(k, v, false)), + InitResult::ReadExisting(v) => Ok(Entry::new(k, v, false, false)), InitResult::InitErr(e) => { crossbeam_epoch::pin().flush(); Err(e) diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index ad41b056..0d4571e1 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -357,7 +357,7 @@ where is_expiry_modified, }; read_recorder(op, now); - Some(Entry::new(maybe_key, v, false)) + Some(Entry::new(maybe_key, v, false, false)) } else { read_recorder(ReadOp::Miss(hash), now); None From 39e930145ccdfec2c5ea0d1b6418bca60803e481 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 31 Dec 2023 17:53:58 +0800 Subject: [PATCH 03/16] rustfmt --- src/future/entry_selector.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/future/entry_selector.rs b/src/future/entry_selector.rs index 596ea20b..475b1c95 100644 --- a/src/future/entry_selector.rs +++ b/src/future/entry_selector.rs @@ -360,9 +360,7 @@ where Fut: Future, { let key = Arc::new(self.owned_key); - self.cache - .upsert_with_hash_and_fun(key, self.hash, f) - .await + self.cache.upsert_with_hash_and_fun(key, self.hash, f).await } } @@ -722,8 +720,6 @@ where Fut: Future, { let key = Arc::new(self.ref_key.to_owned()); - self.cache - .upsert_with_hash_and_fun(key, self.hash, f) - .await + self.cache.upsert_with_hash_and_fun(key, self.hash, f).await } } From 64908f67a285ac73ef7c4d57d17792b0f5f2c5b4 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Mon, 1 Jan 2024 17:21:10 +0800 Subject: [PATCH 04/16] Compute API - Add entry `and_compute_with` method to `future::Cache` --- examples/bounded_counter_async.rs | 79 +++++++++++++++++++++++++++++++ examples/counter_async.rs | 6 +-- src/future/cache.rs | 54 +++++++++++++++++++-- src/future/entry_selector.rs | 24 +++++++++- src/future/value_initializer.rs | 21 ++++++-- src/ops.rs | 6 ++- 6 files changed, 176 insertions(+), 14 deletions(-) create mode 100644 examples/bounded_counter_async.rs diff --git a/examples/bounded_counter_async.rs b/examples/bounded_counter_async.rs new file mode 100644 index 00000000..cae64465 --- /dev/null +++ b/examples/bounded_counter_async.rs @@ -0,0 +1,79 @@ +use moka::{ + future::Cache, + ops::compute::{self, PerformedOp}, + Entry, +}; + +/// This example demonstrates how to increment a cached `u64` counter. It uses the +/// `and_upsert_with` method of `Cache`. +#[tokio::main] +async fn main() { + let cache: Cache = Cache::new(100); + let key = "key".to_string(); + + let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; + assert_eq!(performed_op, PerformedOp::Inserted); + + let entry = maybe_entry.expect("An entry should be returned"); + assert!(entry.is_fresh()); + assert!(!entry.is_updated()); + assert_eq!(entry.into_value(), 1); + + let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; + assert_eq!(performed_op, PerformedOp::Updated); + + let entry = maybe_entry.expect("An entry should be returned"); + assert!(entry.is_fresh()); + assert!(entry.is_updated()); + assert_eq!(entry.into_value(), 2); + + let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; + assert_eq!(performed_op, PerformedOp::Removed); + + let entry = maybe_entry.expect("An entry should be returned"); + assert!(!entry.is_fresh()); + assert!(!entry.is_updated()); + assert_eq!(entry.into_value(), 2); + + assert!(!cache.contains_key(&key)); + + let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; + assert_eq!(performed_op, PerformedOp::Inserted); + + let entry = maybe_entry.expect("An entry should be returned"); + assert!(entry.is_fresh()); + assert!(!entry.is_updated()); + assert_eq!(entry.into_value(), 1); +} + +/// Increment a cached `u64` counter. If the counter is greater than or equal to 2, +/// remove it. +async fn inclement_or_remove_counter( + cache: &Cache, + key: &str, +) -> (Option>, compute::PerformedOp) { + // - If the counter does not exist, insert a new value of 1. + // - If the counter is less than 2, increment it by 1. + // - If the counter is greater than or equal to 2, remove it. + cache + .entry_by_ref(key) + .and_compute_with(|maybe_entry| { + let op = if let Some(entry) = maybe_entry { + // The entry exists. + let counter = entry.into_value(); + if counter < 2 { + // Increment the counter by 1. + compute::Op::Put(counter.saturating_add(1)) + } else { + // Remove the entry. + compute::Op::Remove + } + } else { + // The entry does not exist, insert a new value of 1. + compute::Op::Put(1) + }; + // Return a Future that is resolved to `op` immediately. + std::future::ready(op) + }) + .await +} diff --git a/examples/counter_async.rs b/examples/counter_async.rs index 55ef880d..f62d33c1 100644 --- a/examples/counter_async.rs +++ b/examples/counter_async.rs @@ -27,15 +27,15 @@ async fn inclement_counter(cache: &Cache, key: &str) -> Entry, { let type_id = ValueInitializer::::type_id_for_compute_with(); - let post_init = ValueInitializer::::post_init_for_compute_with; + let post_init = ValueInitializer::::post_init_for_upsert_with; match self .value_initializer @@ -1862,6 +1862,50 @@ where } } + pub(crate) async fn compute_with_hash_and_fun( + &self, + key: Arc, + hash: u64, + f: F, + ) -> (Option>, compute::PerformedOp) + where + F: FnOnce(Option>) -> Fut, + Fut: Future>, + { + let type_id = ValueInitializer::::type_id_for_compute_with(); + let post_init = ValueInitializer::::post_init_for_compute_with; + + match self + .value_initializer + .try_compute(&key, hash, type_id, self, f, post_init) + .await + { + ComputeResult::Nop(maybe_value) => { + let maybe_entry = + maybe_value.map(|value| Entry::new(Some(key), value, false, false)); + (maybe_entry, compute::PerformedOp::Nop) + } + ComputeResult::Inserted(value) => { + crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(key), value, true, false); + (Some(entry), compute::PerformedOp::Inserted) + } + ComputeResult::Updated(value) => { + crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(key), value, true, true); + (Some(entry), compute::PerformedOp::Updated) + } + ComputeResult::Removed(value) => { + crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(key), value, false, false); + (Some(entry), compute::PerformedOp::Removed) + } + ComputeResult::EvalErr(_) => { + unreachable!() + } + } + } + async fn invalidate_with_hash(&self, key: &Q, hash: u64, need_value: bool) -> Option where K: Borrow, @@ -2008,6 +2052,10 @@ where async fn insert(&self, key: Arc, hash: u64, value: V) { self.insert_with_hash(key.clone(), hash, value).await; } + + async fn remove(&self, key: &Arc, hash: u64) -> Option { + self.invalidate_with_hash(key, hash, true).await + } } // For unit tests. diff --git a/src/future/entry_selector.rs b/src/future/entry_selector.rs index 475b1c95..c1c1017e 100644 --- a/src/future/entry_selector.rs +++ b/src/future/entry_selector.rs @@ -1,4 +1,4 @@ -use crate::Entry; +use crate::{ops::compute, Entry}; use super::Cache; @@ -362,6 +362,17 @@ where let key = Arc::new(self.owned_key); self.cache.upsert_with_hash_and_fun(key, self.hash, f).await } + + pub async fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) + where + F: FnOnce(Option>) -> Fut, + Fut: Future>, + { + let key = Arc::new(self.owned_key); + self.cache + .compute_with_hash_and_fun(key, self.hash, f) + .await + } } /// Provides advanced methods to select or insert an entry of the cache. @@ -722,4 +733,15 @@ where let key = Arc::new(self.ref_key.to_owned()); self.cache.upsert_with_hash_and_fun(key, self.hash, f).await } + + pub async fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) + where + F: FnOnce(Option>) -> Fut, + Fut: Future>, + { + let key = Arc::new(self.ref_key.to_owned()); + self.cache + .compute_with_hash_and_fun(key, self.hash, f) + .await + } } diff --git a/src/future/value_initializer.rs b/src/future/value_initializer.rs index 54850d53..7141b4ad 100644 --- a/src/future/value_initializer.rs +++ b/src/future/value_initializer.rs @@ -34,6 +34,8 @@ pub(crate) trait GetOrInsert { V: 'static; async fn insert(&self, key: Arc, hash: u64, value: V); + + async fn remove(&self, key: &Arc, hash: u64) -> Option; } type ErrorObject = Arc; @@ -47,7 +49,6 @@ pub(crate) enum InitResult { pub(crate) enum ComputeResult { Inserted(V), Updated(V), - #[allow(unused)] Removed(V), Nop(Option), EvalErr(E), @@ -301,7 +302,7 @@ where E: Send + Sync + 'static, { use std::panic::{resume_unwind, AssertUnwindSafe}; - use ComputeResult::{EvalErr, Inserted, Nop, Updated}; + use ComputeResult::{EvalErr, Inserted, Nop, Removed, Updated}; let (w_key, w_hash) = waiter_key_hash(&self.waiters, c_key, type_id); @@ -375,7 +376,12 @@ where } } compute::Op::Remove => { - todo!() + let maybe_prev_v = cache.remove(c_key, c_hash).await; + if let Some(prev_v) = maybe_prev_v { + Removed(prev_v) + } else { + Nop(None) + } } }, Err(e) => EvalErr(e), @@ -411,9 +417,14 @@ where result } + /// The `post_init` function for the `and_upsert_with` method of cache. + pub(crate) fn post_init_for_upsert_with(value: V) -> Result, ()> { + Ok(compute::Op::Put(value)) + } + /// The `post_init` function for the `and_compute_with` method of cache. - pub(crate) fn post_init_for_compute_with(op: V) -> Result, ()> { - Ok(compute::Op::Put(op)) + pub(crate) fn post_init_for_compute_with(op: compute::Op) -> Result, ()> { + Ok(op) } /// Returns the `type_id` for `get_with` method of cache. diff --git a/src/ops.rs b/src/ops.rs index fee88f1d..7481ac31 100644 --- a/src/ops.rs +++ b/src/ops.rs @@ -1,6 +1,7 @@ pub mod compute { /// Instructs the `and_compute` method how to modify the cache entry. + #[derive(Debug, Clone, PartialEq, Eq)] pub enum Op { /// No-op. Do not modify the cached entry. Nop, @@ -12,10 +13,11 @@ pub mod compute { /// Will be returned from `and_compute_with` and similar methods to indicate what /// kind of operation was performed. + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum PerformedOp { /// The entry did not exist, or already existed but was not modified. Nop, - /// The entry did not exist and inserted. + /// The entry did not exist and was inserted. Inserted, /// The entry already existed and its value was updated. Updated, @@ -23,6 +25,6 @@ pub mod compute { /// /// Note: If `and_compute_with` tried to remove a not-exiting entry, `Nop` /// will be returned. - Remove, + Removed, } } From c6ce7d50caaff5f57f3b6cd98bb96d837573834a Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Mon, 1 Jan 2024 17:32:25 +0800 Subject: [PATCH 05/16] Compute API - Add entry `and_compute_with` method to `future::Cache` --- Cargo.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index c21baad0..349fd7e0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -118,6 +118,10 @@ required-features = ["future"] name = "basics_sync" required-features = ["sync"] +[[example]] +name = "bounded_counter_async" +required-features = ["future"] + [[example]] name = "cascading_drop_async" required-features = ["future"] From b936e004a9d8d8051d6cc1b1820b92803069c017 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Mon, 1 Jan 2024 19:37:52 +0800 Subject: [PATCH 06/16] Compute API - Add entry `and_try_compute_with` method to `future::Cache` --- Cargo.toml | 6 +- examples/append_value_async.rs | 5 +- examples/bounded_counter_async.rs | 34 +++++---- examples/counter_async.rs | 13 ++-- examples/try_append_value_async.rs | 109 +++++++++++++++++++++++++++++ src/future/cache.rs | 43 ++++++++++++ src/future/entry_selector.rs | 30 ++++++++ src/future/value_initializer.rs | 10 +++ 8 files changed, 223 insertions(+), 27 deletions(-) create mode 100644 examples/try_append_value_async.rs diff --git a/Cargo.toml b/Cargo.toml index 349fd7e0..08464a4b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,7 +81,7 @@ getrandom = "0.2" paste = "1.0.9" reqwest = { version = "0.11.11", default-features = false, features = ["rustls-tls"] } skeptic = "0.13" -tokio = { version = "1.19", features = ["fs", "macros", "rt-multi-thread", "sync", "time" ] } +tokio = { version = "1.19", features = ["fs", "io-util", "macros", "rt-multi-thread", "sync", "time" ] } [target.'cfg(trybuild)'.dev-dependencies] trybuild = "1.0" @@ -137,3 +137,7 @@ required-features = ["sync"] [[example]] name = "size_aware_eviction_sync" required-features = ["sync"] + +[[example]] +name = "try_append_value_async" +required-features = ["future"] diff --git a/examples/append_value_async.rs b/examples/append_value_async.rs index 6b1f0314..4d48c649 100644 --- a/examples/append_value_async.rs +++ b/examples/append_value_async.rs @@ -1,10 +1,11 @@ +//! This example demonstrates how to append an `i32` value to a cached `Vec` +//! value. It uses the `and_upsert_with` method of `Cache`. + use std::sync::Arc; use moka::{future::Cache, Entry}; use tokio::sync::RwLock; -/// This example demonstrates how to append an `i32` value to a cached `Vec` -/// value. It uses the `and_upsert_with` method of `Cache`. #[tokio::main] async fn main() { // We want to store a raw value `Vec` for each `String` key. We are going to diff --git a/examples/bounded_counter_async.rs b/examples/bounded_counter_async.rs index cae64465..a914251f 100644 --- a/examples/bounded_counter_async.rs +++ b/examples/bounded_counter_async.rs @@ -1,53 +1,51 @@ +//! This example demonstrates how to increment a cached `u64` counter. It uses the +//! `and_upsert_with` method of `Cache`. + use moka::{ future::Cache, ops::compute::{self, PerformedOp}, Entry, }; -/// This example demonstrates how to increment a cached `u64` counter. It uses the -/// `and_upsert_with` method of `Cache`. #[tokio::main] async fn main() { let cache: Cache = Cache::new(100); let key = "key".to_string(); + // This should insert a now counter value 1 to the cache, and return the value + // with the kind of the operation performed. let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - assert_eq!(performed_op, PerformedOp::Inserted); - let entry = maybe_entry.expect("An entry should be returned"); - assert!(entry.is_fresh()); - assert!(!entry.is_updated()); assert_eq!(entry.into_value(), 1); + assert_eq!(performed_op, PerformedOp::Inserted); + // This should increment the cached counter value by 1. let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - assert_eq!(performed_op, PerformedOp::Updated); - let entry = maybe_entry.expect("An entry should be returned"); - assert!(entry.is_fresh()); - assert!(entry.is_updated()); assert_eq!(entry.into_value(), 2); + assert_eq!(performed_op, PerformedOp::Updated); + // This should remove the cached counter from the cache, and returns the + // _removed_ value. let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - assert_eq!(performed_op, PerformedOp::Removed); - let entry = maybe_entry.expect("An entry should be returned"); - assert!(!entry.is_fresh()); - assert!(!entry.is_updated()); assert_eq!(entry.into_value(), 2); + assert_eq!(performed_op, PerformedOp::Removed); + // The key should no longer exist. assert!(!cache.contains_key(&key)); + // This should start over; insert a new counter value 1 to the cache. let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - assert_eq!(performed_op, PerformedOp::Inserted); - let entry = maybe_entry.expect("An entry should be returned"); - assert!(entry.is_fresh()); - assert!(!entry.is_updated()); assert_eq!(entry.into_value(), 1); + assert_eq!(performed_op, PerformedOp::Inserted); } /// Increment a cached `u64` counter. If the counter is greater than or equal to 2, /// remove it. +/// +/// This method uses cache's `and_compute_with` method. async fn inclement_or_remove_counter( cache: &Cache, key: &str, diff --git a/examples/counter_async.rs b/examples/counter_async.rs index f62d33c1..929326c5 100644 --- a/examples/counter_async.rs +++ b/examples/counter_async.rs @@ -1,29 +1,30 @@ +//! This example demonstrates how to increment a cached `u64` counter. It uses the +//! `and_upsert_with` method of `Cache`. + use moka::{future::Cache, Entry}; -/// This example demonstrates how to increment a cached `u64` counter. It uses the -/// `and_upsert_with` method of `Cache`. #[tokio::main] async fn main() { let cache: Cache = Cache::new(100); let key = "key".to_string(); - let entry = inclement_counter(&cache, &key).await; + let entry = increment_counter(&cache, &key).await; assert!(entry.is_fresh()); assert!(!entry.is_updated()); assert_eq!(entry.into_value(), 1); - let entry = inclement_counter(&cache, &key).await; + let entry = increment_counter(&cache, &key).await; assert!(entry.is_fresh()); assert!(entry.is_updated()); assert_eq!(entry.into_value(), 2); - let entry = inclement_counter(&cache, &key).await; + let entry = increment_counter(&cache, &key).await; assert!(entry.is_fresh()); assert!(entry.is_updated()); assert_eq!(entry.into_value(), 3); } -async fn inclement_counter(cache: &Cache, key: &str) -> Entry { +async fn increment_counter(cache: &Cache, key: &str) -> Entry { cache .entry_by_ref(key) .and_upsert_with(|maybe_entry| { diff --git a/examples/try_append_value_async.rs b/examples/try_append_value_async.rs new file mode 100644 index 00000000..76ca007b --- /dev/null +++ b/examples/try_append_value_async.rs @@ -0,0 +1,109 @@ +//! This example demonstrates how to append an `i32` value to a cached `Vec` +//! value. It uses the `and_upsert_with` method of `Cache`. + +use std::{io::Cursor, pin::Pin, sync::Arc}; + +use moka::{ + future::Cache, + ops::compute::{self, PerformedOp}, + Entry, +}; +use tokio::{ + io::{AsyncRead, AsyncReadExt}, + sync::RwLock, +}; + +/// The type of the cache key. +type Key = i32; + +/// The type of the cache value. +/// +/// We want to store a raw value `String` for each `i32` key. We are going to append +/// `char` to the `String` value in the cache. +/// +/// Note that we have to wrap the `String` in an `Arc>`. We need the `Arc`, +/// an atomic reference counted shared pointer, because `and_try_compute_with` method +/// of `Cache` passes a _clone_ of the value to our closure, instead of passing a +/// `&mut` reference. We do not want to clone the `String` every time we append a +/// `char` to it, so we wrap it in an `Arc`. Then we need the `RwLock` because we +/// mutate the `String` when we append a value to it. +/// +/// The reason that `and_try_compute_with` cannot pass a `&mut String` to the closure +/// is because the internal concurrent hash table of `Cache` is a lock free data +/// structure and does not use any mutexes. So it cannot guarantee: (1) the +/// `&mut String` is unique, and (2) it is not accessed concurrently by other +/// threads. +type Value = Arc>; + +#[tokio::main] +async fn main() -> Result<(), tokio::io::Error> { + let cache: Cache = Cache::new(100); + + let key = 0; + + // We are going read a byte at a time from a byte string (`[u8; 3]`). + let reader = Cursor::new(b"abc"); + tokio::pin!(reader); + + // Read the first char 'a' from the reader, and insert a string "a" to the cache. + let (maybe_entry, performed_op) = append_to_cached_string(&cache, key, &mut reader).await?; + let entry = maybe_entry.expect("An entry should be returned"); + assert_eq!(*entry.into_value().read().await, "a"); + assert_eq!(performed_op, PerformedOp::Inserted); + + // Read next char 'b' from the reader, and append it the cached string. + let (maybe_entry, performed_op) = append_to_cached_string(&cache, key, &mut reader).await?; + let entry = maybe_entry.expect("An entry should be returned"); + assert_eq!(*entry.into_value().read().await, "ab"); + assert_eq!(performed_op, PerformedOp::Updated); + + // Read next char 'c' from the reader, and append it the cached string. + let (maybe_entry, performed_op) = append_to_cached_string(&cache, key, &mut reader).await?; + let entry = maybe_entry.expect("An entry should be returned"); + assert_eq!(*entry.into_value().read().await, "abc"); + assert_eq!(performed_op, PerformedOp::Updated); + + // Reading should fail as no more char left. + let err = append_to_cached_string(&cache, key, &mut reader).await; + assert_eq!( + err.expect_err("An error should be returned").kind(), + tokio::io::ErrorKind::UnexpectedEof + ); + + Ok(()) +} + +/// Reads a byte from the `reader``, convert it into a `char`, append it to the +/// cached `String` for the given `key`, and returns the resulting cached entry. +/// +/// If reading from the `reader` fails with an IO error, it returns the error. +/// +/// This method uses cache's `and_try_compute_with` method. +async fn append_to_cached_string( + cache: &Cache, + key: Key, + reader: &mut Pin<&mut impl AsyncRead>, +) -> Result<(Option>, PerformedOp), tokio::io::Error> { + cache + .entry(key) + .and_try_compute_with(|maybe_entry| async { + // Read a char from the reader. + let byte = reader.read_u8().await?; + let char = + char::from_u32(byte as u32).expect("An ASCII byte should be converted into a char"); + + // Check if the entry already exists. + if let Some(entry) = maybe_entry { + // The entry exists, append the char to the Vec. + let v = entry.into_value(); + v.write().await.push(char); + Ok(compute::Op::Put(v)) + } else { + // The entry does not exist, insert a new Vec containing + // the char. + let v = RwLock::new(String::from(char)); + Ok(compute::Op::Put(Arc::new(v))) + } + }) + .await +} diff --git a/src/future/cache.rs b/src/future/cache.rs index 0fa4cb21..bbd42831 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -1906,6 +1906,49 @@ where } } + pub(crate) async fn try_compute_with_hash_and_fun( + &self, + key: Arc, + hash: u64, + f: F, + ) -> Result<(Option>, compute::PerformedOp), E> + where + F: FnOnce(Option>) -> Fut, + Fut: Future, E>>, + E: Send + Sync + 'static, + { + let type_id = ValueInitializer::::type_id_for_compute_with(); + let post_init = ValueInitializer::::post_init_for_try_compute_with; + + match self + .value_initializer + .try_compute(&key, hash, type_id, self, f, post_init) + .await + { + ComputeResult::Nop(maybe_value) => { + let maybe_entry = + maybe_value.map(|value| Entry::new(Some(key), value, false, false)); + Ok((maybe_entry, compute::PerformedOp::Nop)) + } + ComputeResult::Inserted(value) => { + crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(key), value, true, false); + Ok((Some(entry), compute::PerformedOp::Inserted)) + } + ComputeResult::Updated(value) => { + crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(key), value, true, true); + Ok((Some(entry), compute::PerformedOp::Updated)) + } + ComputeResult::Removed(value) => { + crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(key), value, false, false); + Ok((Some(entry), compute::PerformedOp::Removed)) + } + ComputeResult::EvalErr(e) => Err(e), + } + } + async fn invalidate_with_hash(&self, key: &Q, hash: u64, need_value: bool) -> Option where K: Borrow, diff --git a/src/future/entry_selector.rs b/src/future/entry_selector.rs index c1c1017e..e5a4fd38 100644 --- a/src/future/entry_selector.rs +++ b/src/future/entry_selector.rs @@ -373,6 +373,21 @@ where .compute_with_hash_and_fun(key, self.hash, f) .await } + + pub async fn and_try_compute_with( + self, + f: F, + ) -> Result<(Option>, compute::PerformedOp), E> + where + F: FnOnce(Option>) -> Fut, + Fut: Future, E>>, + E: Send + Sync + 'static, + { + let key = Arc::new(self.owned_key); + self.cache + .try_compute_with_hash_and_fun(key, self.hash, f) + .await + } } /// Provides advanced methods to select or insert an entry of the cache. @@ -744,4 +759,19 @@ where .compute_with_hash_and_fun(key, self.hash, f) .await } + + pub async fn and_try_compute_with( + self, + f: F, + ) -> Result<(Option>, compute::PerformedOp), E> + where + F: FnOnce(Option>) -> Fut, + Fut: Future, E>>, + E: Send + Sync + 'static, + { + let key = Arc::new(self.ref_key.to_owned()); + self.cache + .try_compute_with_hash_and_fun(key, self.hash, f) + .await + } } diff --git a/src/future/value_initializer.rs b/src/future/value_initializer.rs index 7141b4ad..8aee5970 100644 --- a/src/future/value_initializer.rs +++ b/src/future/value_initializer.rs @@ -427,6 +427,16 @@ where Ok(op) } + /// The `post_init` function for the `and_try_compute_with` method of cache. + pub(crate) fn post_init_for_try_compute_with( + op: Result, E>, + ) -> Result, E> + where + E: Send + Sync + 'static, + { + op + } + /// Returns the `type_id` for `get_with` method of cache. pub(crate) fn type_id_for_get_with() -> TypeId { // NOTE: We use a regular function here instead of a const fn because TypeId From cf6b1b6c9d06a97a52a1b22586086b6864234bda Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Wed, 3 Jan 2024 12:36:39 +0800 Subject: [PATCH 07/16] Compute API - Add unit tests for the compute family methods in `future::Cache` --- src/future/cache.rs | 357 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 357 insertions(+) diff --git a/src/future/cache.rs b/src/future/cache.rs index bbd42831..5f85809a 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -4363,6 +4363,363 @@ mod tests { futures_util::join!(task1, task2, task3, task4, task5, task6, task7, task8); } + #[tokio::test] + async fn upsert_with() { + let cache = Cache::new(100); + const KEY: u32 = 0; + + // Spawn three async tasks to call `and_upsert_with` for the same key and + // each task increments the current value by 1. Ensure the key-level lock is + // working by verifying the value is 3 after all tasks finish. + // + // | | task 1 | task 2 | task 3 | + // |--------|----------|----------|----------| + // | 0 ms | get none | | | + // | 100 ms | | blocked | | + // | 200 ms | insert 1 | | | + // | | | get 1 | | + // | 300 ms | | | blocked | + // | 400 ms | | insert 2 | | + // | | | | get 2 | + // | 500 ms | | | insert 3 | + + let task1 = { + let cache1 = cache.clone(); + async move { + cache1 + .entry(KEY) + .and_upsert_with(|maybe_entry| async move { + sleep(Duration::from_millis(200)).await; + assert!(maybe_entry.is_none()); + 1 + }) + .await + } + }; + + let task2 = { + let cache2 = cache.clone(); + async move { + sleep(Duration::from_millis(100)).await; + cache2 + .entry_by_ref(&KEY) + .and_upsert_with(|maybe_entry| async move { + sleep(Duration::from_millis(200)).await; + let entry = maybe_entry.expect("The entry should exist"); + entry.into_value() + 1 + }) + .await + } + }; + + let task3 = { + let cache3 = cache.clone(); + async move { + sleep(Duration::from_millis(300)).await; + cache3 + .entry_by_ref(&KEY) + .and_upsert_with(|maybe_entry| async move { + sleep(Duration::from_millis(100)).await; + let entry = maybe_entry.expect("The entry should exist"); + entry.into_value() + 1 + }) + .await + } + }; + + let (ent1, ent2, ent3) = futures_util::join!(task1, task2, task3); + assert_eq!(ent1.into_value(), 1); + assert_eq!(ent2.into_value(), 2); + assert_eq!(ent3.into_value(), 3); + + assert_eq!(cache.get(&KEY).await, Some(3)); + } + + #[tokio::test] + async fn compute_with() { + use crate::ops::compute; + use tokio::sync::RwLock; + + let cache = Cache::new(100); + const KEY: u32 = 0; + + // Spawn six async tasks to call `and_compute_with` for the same key. Ensure + // the key-level lock is working by verifying the value after all tasks + // finish. + // + // | | task 1 | task 2 | task 3 | task 4 | task 5 | task 6 | + // |---------|------------|---------------|------------|----------|------------|---------| + // | 0 ms | get none | | | | | | + // | 100 ms | | blocked | | | | | + // | 200 ms | insert [1] | | | | | | + // | | | get [1] | | | | | + // | 300 ms | | | blocked | | | | + // | 400 ms | | insert [1, 2] | | | | | + // | | | | get [1, 2] | | | | + // | 500 ms | | | | blocked | | | + // | 600 ms | | | remove | | | | + // | | | | | get none | | | + // | 700 ms | | | | | blocked | | + // | 800 ms | | | | nop | | | + // | | | | | | get none | | + // | 900 ms | | | | | | blocked | + // | 1000 ms | | | | | insert [5] | | + // | | | | | | | get [5] | + // | 1100 ms | | | | | | nop | + + let task1 = { + let cache1 = cache.clone(); + async move { + cache1 + .entry(KEY) + .and_compute_with(|maybe_entry| async move { + sleep(Duration::from_millis(200)).await; + assert!(maybe_entry.is_none()); + compute::Op::Put(Arc::new(RwLock::new(vec![1]))) + }) + .await + } + }; + + let task2 = { + let cache2 = cache.clone(); + async move { + sleep(Duration::from_millis(100)).await; + cache2 + .entry_by_ref(&KEY) + .and_compute_with(|maybe_entry| async move { + let entry = maybe_entry.expect("The entry should exist"); + let value = entry.into_value(); + assert_eq!(*value.read().await, vec![1]); + sleep(Duration::from_millis(200)).await; + value.write().await.push(2); + compute::Op::Put(value) + }) + .await + } + }; + + let task3 = { + let cache3 = cache.clone(); + async move { + sleep(Duration::from_millis(300)).await; + cache3 + .entry(KEY) + .and_compute_with(|maybe_entry| async move { + let entry = maybe_entry.expect("The entry should exist"); + let value = entry.into_value(); + assert_eq!(*value.read().await, vec![1, 2]); + sleep(Duration::from_millis(200)).await; + compute::Op::Remove + }) + .await + } + }; + + let task4 = { + let cache4 = cache.clone(); + async move { + sleep(Duration::from_millis(500)).await; + cache4 + .entry(KEY) + .and_compute_with(|maybe_entry| async move { + assert!(maybe_entry.is_none()); + sleep(Duration::from_millis(200)).await; + compute::Op::Nop + }) + .await + } + }; + + let task5 = { + let cache5 = cache.clone(); + async move { + sleep(Duration::from_millis(700)).await; + cache5 + .entry_by_ref(&KEY) + .and_compute_with(|maybe_entry| async move { + assert!(maybe_entry.is_none()); + sleep(Duration::from_millis(200)).await; + compute::Op::Put(Arc::new(RwLock::new(vec![5]))) + }) + .await + } + }; + + let task6 = { + let cache6 = cache.clone(); + async move { + sleep(Duration::from_millis(900)).await; + cache6 + .entry_by_ref(&KEY) + .and_compute_with(|maybe_entry| async move { + let entry = maybe_entry.expect("The entry should exist"); + let value = entry.into_value(); + assert_eq!(*value.read().await, vec![5]); + sleep(Duration::from_millis(100)).await; + compute::Op::Nop + }) + .await + } + }; + + let ((ent1, op1), (ent2, op2), (ent3, op3), (ent4, op4), (ent5, op5), (v6, op6)) = + futures_util::join!(task1, task2, task3, task4, task5, task6); + assert_eq!(op1, compute::PerformedOp::Inserted); + assert_eq!(op2, compute::PerformedOp::Updated); + assert_eq!(op3, compute::PerformedOp::Removed); + assert_eq!(op4, compute::PerformedOp::Nop); + assert_eq!(op5, compute::PerformedOp::Inserted); + assert_eq!(op6, compute::PerformedOp::Nop); + + assert_eq!( + *ent1.expect("should have entry").into_value().read().await, + vec![1, 2] // The same Vec was modified by task2. + ); + assert_eq!( + *ent2.expect("should have entry").into_value().read().await, + vec![1, 2] + ); + assert_eq!( + *ent3.expect("should have entry").into_value().read().await, + vec![1, 2] // Removed value + ); + assert!(ent4.is_none(),); + assert_eq!( + *ent5.expect("should have entry").into_value().read().await, + vec![5] + ); + assert_eq!( + *v6.expect("should have entry").into_value().read().await, + vec![5] + ); + } + + #[tokio::test] + async fn try_compute_with() { + use crate::ops::compute; + use tokio::sync::RwLock; + + let cache: Cache>>> = Cache::new(100); + const KEY: u32 = 0; + + // Spawn four async tasks to call `and_try_compute_with` for the same key. + // Ensure the key-level lock is working by verifying the value after all + // tasks finish. + // + // | | task 1 | task 2 | task 3 | task 4 | + // |---------|------------|---------------|------------|------------| + // | 0 ms | get none | | | | + // | 100 ms | | blocked | | | + // | 200 ms | insert [1] | | | | + // | | | get [1] | | | + // | 300 ms | | | blocked | | + // | 400 ms | | insert [1, 2] | | | + // | | | | get [1, 2] | | + // | 500 ms | | | | blocked | + // | 600 ms | | | err | | + // | | | | | get [1, 2] | + // | 700 ms | | | | remove | + // + // This test is shorter than `compute_with` test because this one omits `Nop` + // cases. + + let task1 = { + let cache1 = cache.clone(); + async move { + cache1 + .entry(KEY) + .and_try_compute_with(|maybe_entry| async move { + sleep(Duration::from_millis(200)).await; + assert!(maybe_entry.is_none()); + Ok(compute::Op::Put(Arc::new(RwLock::new(vec![1])))) as Result<_, ()> + }) + .await + } + }; + + let task2 = { + let cache2 = cache.clone(); + async move { + sleep(Duration::from_millis(100)).await; + cache2 + .entry_by_ref(&KEY) + .and_try_compute_with(|maybe_entry| async move { + let entry = maybe_entry.expect("The entry should exist"); + let value = entry.into_value(); + assert_eq!(*value.read().await, vec![1]); + sleep(Duration::from_millis(200)).await; + value.write().await.push(2); + Ok(compute::Op::Put(value)) as Result<_, ()> + }) + .await + } + }; + + let task3 = { + let cache3 = cache.clone(); + async move { + sleep(Duration::from_millis(300)).await; + cache3 + .entry(KEY) + .and_try_compute_with(|maybe_entry| async move { + let entry = maybe_entry.expect("The entry should exist"); + let value = entry.into_value(); + assert_eq!(*value.read().await, vec![1, 2]); + sleep(Duration::from_millis(200)).await; + Err(()) + }) + .await + } + }; + + let task4 = { + let cache4 = cache.clone(); + async move { + sleep(Duration::from_millis(500)).await; + cache4 + .entry(KEY) + .and_try_compute_with(|maybe_entry| async move { + let entry = maybe_entry.expect("The entry should exist"); + let value = entry.into_value(); + assert_eq!(*value.read().await, vec![1, 2]); + sleep(Duration::from_millis(100)).await; + Ok(compute::Op::Remove) as Result<_, ()> + }) + .await + } + }; + + let (res1, res2, res3, res4) = futures_util::join!(task1, task2, task3, task4); + let Ok((ent1, op1)) = res1 else { + panic!("res1 should be an Ok") + }; + let Ok((ent2, op2)) = res2 else { + panic!("res2 should be an Ok") + }; + assert!(res3.is_err()); + let Ok((ent4, op4)) = res4 else { + panic!("res4 should be an Ok") + }; + + assert_eq!(op1, compute::PerformedOp::Inserted); + assert_eq!(op2, compute::PerformedOp::Updated); + assert_eq!(op4, compute::PerformedOp::Removed); + + assert_eq!( + *ent1.expect("should have entry").into_value().read().await, + vec![1, 2] // The same Vec was modified by task2. + ); + assert_eq!( + *ent2.expect("should have entry").into_value().read().await, + vec![1, 2] + ); + assert_eq!( + *ent4.expect("should have entry").into_value().read().await, + vec![1, 2] // Removed value. + ); + } + #[tokio::test] // https://github.com/moka-rs/moka/issues/43 async fn handle_panic_in_get_with() { From 6427668eb8fb75b0362e8955dc503a14eb7de70c Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Wed, 3 Jan 2024 18:04:57 +0800 Subject: [PATCH 08/16] Compute API - Add some docs on the entry `and_try_compute_with` method of `future::Cache` --- .vscode/settings.json | 1 + examples/bounded_counter_async.rs | 2 +- src/future/entry_selector.rs | 411 +++++++++++++++++++++++++----- src/ops.rs | 26 +- 4 files changed, 360 insertions(+), 80 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 9d1719f1..22ae28b7 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -64,6 +64,7 @@ "Uninit", "unsync", "Upsert", + "upserted", "usize" ], "files.watcherExclude": { diff --git a/examples/bounded_counter_async.rs b/examples/bounded_counter_async.rs index a914251f..c173bd46 100644 --- a/examples/bounded_counter_async.rs +++ b/examples/bounded_counter_async.rs @@ -1,5 +1,5 @@ //! This example demonstrates how to increment a cached `u64` counter. It uses the -//! `and_upsert_with` method of `Cache`. +//! `and_compute_with` method of `Cache`. use moka::{ future::Cache, diff --git a/src/future/entry_selector.rs b/src/future/entry_selector.rs index e5a4fd38..f30ebdb1 100644 --- a/src/future/entry_selector.rs +++ b/src/future/entry_selector.rs @@ -39,6 +39,312 @@ where } } + /// This method performs a compute operation on a key by using the given closure + /// `f`. A compute operation is either put, remove or nop (no-operation). + /// + /// The closure `f` should take the current entry of `Option>` for + /// the key, and return a `Future` that resolves to an `ops::compute::Op` + /// enum. + /// + /// This method works as the followings: + /// + /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`. + /// 2. Resolve the `Future`, and get an `ops::compute::Op`. + /// 3. Execute the op on the cache: + /// - `Op::Put(V)`: Put the new value `V` to the cache. + /// - `Op::Remove`: Remove the current cached entry. + /// - `Op::Nop`: Do nothing. + /// 4. Return a `(Entry, ops::compute::PerformedOp)` as the followings: + /// + /// | [`Op`] | `Entry` to return | [`PerformedOp`] | + /// |:--------- |:------------------- |:----------------------- | + /// | `Put(V)` | The current entry | `Inserted` or `Updated` | + /// | `Remove` | The _removed_ entry | `Removed` | + /// | `Nop` | The current entry | `Nop` | + /// + /// **Notes:** + /// + /// - `Op::Put(V)`: `PerformedOp::Updated` is returned when the key already + /// existed in the cache. It is _not_ related to whether the value was actually + /// updated or not. + /// - `Op::Remove`: Unlike other ops, the _removed_ entry is returned. If you mix + /// `Remove` with other ops, ensure to check whether the performed op is + /// `Removed` or not. + /// + /// # Similar Methods + /// + /// - If you want the `Future` resolve to `Result>` instead of `Op`, and + /// upsert only when resolved to `Ok(V)`, use the [`and_try_compute_with`] + /// method. + /// - If you only want to put, use the [`and_upsert_with`] method. + /// + /// [`Entry`]: ../struct.Entry.html + /// [`Op`]: ../ops/compute/enum.Op.html + /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`and_upsert_with`]: #method.and_upsert_with + /// [`and_try_compute_with`]: #method.and_try_compute_with + /// + /// # Example + /// + /// ```rust + /// // Cargo.toml + /// // + /// // [dependencies] + /// // moka = { version = "0.12", features = ["future"] } + /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } + /// + /// use moka::{ + /// future::Cache, + /// ops::compute::{self, PerformedOp}, + /// Entry, + /// }; + /// + /// #[tokio::main] + /// async fn main() { + /// let cache: Cache = Cache::new(100); + /// let key = "key1".to_string(); + /// + /// /// Increment a cached `u64` counter. If the counter is greater than or + /// /// equal to 2, remove it. + /// async fn inclement_or_remove_counter( + /// cache: &Cache, + /// key: &str, + /// ) -> (Option>, compute::PerformedOp) { + /// cache + /// .entry(key.to_string()) + /// .and_compute_with(|maybe_entry| { + /// let op = if let Some(entry) = maybe_entry { + /// let counter = entry.into_value(); + /// if counter < 2 { + /// compute::Op::Put(counter.saturating_add(1)) // Update + /// } else { + /// compute::Op::Remove // Remove + /// } + /// } else { + /// compute::Op::Put(1) // Insert + /// }; + /// // Return a Future that is resolved to `op` immediately. + /// std::future::ready(op) + /// }) + /// .await + /// } + /// + /// // This should insert a now counter value 1 to the cache, and return the + /// // value with the kind of the operation performed. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 1); + /// assert_eq!(performed_op, PerformedOp::Inserted); + /// + /// // This should increment the cached counter value by 1. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 2); + /// assert_eq!(performed_op, PerformedOp::Updated); + /// + /// // This should remove the cached counter from the cache, and returns the + /// // _removed_ value. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 2); + /// assert_eq!(performed_op, PerformedOp::Removed); + /// + /// // The key should no longer exist. + /// assert!(!cache.contains_key(&key)); + /// + /// // This should start over; insert a new counter value 1 to the cache. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 1); + /// assert_eq!(performed_op, PerformedOp::Inserted); + /// } + /// ``` + /// + /// # Concurrent calls on the same key + /// + /// This method guarantees that concurrent calls on the same key are executed + /// serially. That is, `and_compute_with` calls on the same key never run + /// concurrently. The calls are serialized by the order of their invocation. It + /// uses a key-level lock to achieve this. + pub async fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) + where + F: FnOnce(Option>) -> Fut, + Fut: Future>, + { + let key = Arc::new(self.owned_key); + self.cache + .compute_with_hash_and_fun(key, self.hash, f) + .await + } + + /// This method performs a compute operation on a key by using the given closure + /// `f`. A compute operation is either put, remove or nop (no-operation). + /// + /// The closure `f` should take the current entry of `Option>` for + /// the key, and return a `Future` that resolves to an + /// `Result, E>`. + /// + /// This method works as the followings: + /// + /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`. + /// 2. Resolve the `Future`, and get a `Result, E>`. + /// 3. If resolved to `Err(E)`, return it. + /// 4. Else, execute the op on the cache: + /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache. + /// - `Ok(Op::Remove)`: Remove the current cached entry. + /// - `Ok(Op::Nop)`: Do nothing. + /// 5. Return a `Ok((Entry, ops::compute::PerformedOp))` as the followings: + /// + /// | [`Op`] | `Entry` to return | [`PerformedOp`] | + /// |:--------- |:------------------- |:----------------------- | + /// | `Put(V)` | The current entry | `Inserted` or `Updated` | + /// | `Remove` | The _removed_ entry | `Removed` | + /// | `Nop` | The current entry | `Nop` | + /// + /// **Notes:** + /// + /// - `Op::Put(V)`: `PerformedOp::Updated` is returned when the key already + /// existed in the cache. It is _not_ related to whether the value was actually + /// updated or not. + /// - `Op::Remove`: Unlike other ops, the _removed_ entry is returned. If you mix + /// `Remove` with other ops, ensure to check whether the performed op is + /// `Removed` or not. + /// + /// # Similar Methods + /// + /// - If you want the `Future` resolve to `Op` instead of `Result>`, use + /// the [`and_compute_with`] method. + /// - If you only want to put, use the [`and_upsert_with`] method. + /// + /// [`Entry`]: ../struct.Entry.html + /// [`Op`]: ../ops/compute/enum.Op.html + /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`and_upsert_with`]: #method.and_upsert_with + /// [`and_compute_with`]: #method.and_compute_with + /// + /// # Example + /// + /// See [`try_append_value_async.rs`] in the `examples` directory. + /// + /// [`try_append_value_async.rs`]: + /// https://github.com/moka-rs/moka/tree/main/examples/try_append_value_async.rs + /// + /// # Concurrent calls on the same key + /// + /// This method guarantees that concurrent calls on the same key are executed + /// serially. That is, `and_try_compute_with` calls on the same key never run + /// concurrently. The calls are serialized by the order of their invocation. It + /// uses a key-level lock to achieve this. + pub async fn and_try_compute_with( + self, + f: F, + ) -> Result<(Option>, compute::PerformedOp), E> + where + F: FnOnce(Option>) -> Fut, + Fut: Future, E>>, + E: Send + Sync + 'static, + { + let key = Arc::new(self.owned_key); + self.cache + .try_compute_with_hash_and_fun(key, self.hash, f) + .await + } + + /// This method performs an upsert of an [`Entry`] by using the given closure + /// `f`. The word "upsert" here means "update" or "insert". + /// + /// The closure `f` should take the current entry of `Option>` for + /// the key, and return a `Future` that resolves to a new value `V`. + /// + /// This method works as the followings: + /// + /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`. + /// 2. Resolve the `Future`, and get a new value `V`. + /// 3. Upsert the new value to the cache. + /// 4. Return the `Entry` having the upserted value. + /// + /// # Similar Methods + /// + /// - If you want to optionally upsert, that is to upsert only when certain + /// conditions meet, use the [`and_compute_with`] method. + /// - If you try to upsert, that is to make the `Future` resolve to `Result` + /// instead of `V`, and upsert only when resolved to `Ok(V)`, use the + /// [`and_try_compute_with`] method. + /// + /// [`Entry`]: ../struct.Entry.html + /// [`and_compute_with`]: #method.and_compute_with + /// [`and_try_compute_with`]: #method.and_try_compute_with + /// + /// # Example + /// + /// ```rust + /// // Cargo.toml + /// // + /// // [dependencies] + /// // moka = { version = "0.12", features = ["future"] } + /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } + /// + /// use moka::future::Cache; + /// + /// #[tokio::main] + /// async fn main() { + /// let cache: Cache = Cache::new(100); + /// let key = "key1".to_string(); + /// + /// let entry = cache + /// .entry(key.clone()) + /// .and_upsert_with(|maybe_entry| { + /// let counter = if let Some(entry) = maybe_entry { + /// entry.into_value().saturating_add(1) // Update + /// } else { + /// 1 // Insert + /// }; + /// // Return a Future that is resolved to `counter` immediately. + /// std::future::ready(counter) + /// }) + /// .await; + /// // It was not an update. + /// assert!(!entry.is_updated()); + /// assert_eq!(entry.key(), &key); + /// assert_eq!(entry.into_value(), 1); + /// + /// let entry = cache + /// .entry(key.clone()) + /// .and_upsert_with(|maybe_entry| { + /// let counter = if let Some(entry) = maybe_entry { + /// entry.into_value().saturating_add(1) + /// } else { + /// 1 + /// }; + /// std::future::ready(counter) + /// }) + /// .await; + /// // It was an update. + /// assert!(entry.is_updated()); + /// assert_eq!(entry.key(), &key); + /// assert_eq!(entry.into_value(), 2); + /// } + /// ``` + /// + /// Note: The `is_updated` method of the `Entry` returns `true` when the key + /// already existed in the cache. It is not related to whether the value was + /// actually updated or not. + /// + /// # Concurrent calls on the same key + /// + /// This method guarantees that concurrent calls on the same key are executed + /// serially. That is, `and_upsert_with` calls on the same key never run + /// concurrently. The calls are serialized by the order of their invocation. It + /// uses a key-level lock to achieve this. + pub async fn and_upsert_with(self, f: F) -> Entry + where + F: FnOnce(Option>) -> Fut, + Fut: Future, + { + let key = Arc::new(self.owned_key); + self.cache.upsert_with_hash_and_fun(key, self.hash, f).await + } + /// Returns the corresponding [`Entry`] for the key given when this entry /// selector was constructed. If the entry does not exist, inserts one by calling /// the [`default`][std-default-function] function of the value type `V`. @@ -353,41 +659,6 @@ where .get_or_try_insert_with_hash_and_fun(key, self.hash, init, true) .await } - - pub async fn and_upsert_with(self, f: F) -> Entry - where - F: FnOnce(Option>) -> Fut, - Fut: Future, - { - let key = Arc::new(self.owned_key); - self.cache.upsert_with_hash_and_fun(key, self.hash, f).await - } - - pub async fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) - where - F: FnOnce(Option>) -> Fut, - Fut: Future>, - { - let key = Arc::new(self.owned_key); - self.cache - .compute_with_hash_and_fun(key, self.hash, f) - .await - } - - pub async fn and_try_compute_with( - self, - f: F, - ) -> Result<(Option>, compute::PerformedOp), E> - where - F: FnOnce(Option>) -> Fut, - Fut: Future, E>>, - E: Send + Sync + 'static, - { - let key = Arc::new(self.owned_key); - self.cache - .try_compute_with_hash_and_fun(key, self.hash, f) - .await - } } /// Provides advanced methods to select or insert an entry of the cache. @@ -424,6 +695,41 @@ where } } + pub async fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) + where + F: FnOnce(Option>) -> Fut, + Fut: Future>, + { + let key = Arc::new(self.ref_key.to_owned()); + self.cache + .compute_with_hash_and_fun(key, self.hash, f) + .await + } + + pub async fn and_try_compute_with( + self, + f: F, + ) -> Result<(Option>, compute::PerformedOp), E> + where + F: FnOnce(Option>) -> Fut, + Fut: Future, E>>, + E: Send + Sync + 'static, + { + let key = Arc::new(self.ref_key.to_owned()); + self.cache + .try_compute_with_hash_and_fun(key, self.hash, f) + .await + } + + pub async fn and_upsert_with(self, f: F) -> Entry + where + F: FnOnce(Option>) -> Fut, + Fut: Future, + { + let key = Arc::new(self.ref_key.to_owned()); + self.cache.upsert_with_hash_and_fun(key, self.hash, f).await + } + /// Returns the corresponding [`Entry`] for the reference of the key given when /// this entry selector was constructed. If the entry does not exist, inserts one /// by cloning the key and calling the [`default`][std-default-function] function @@ -739,39 +1045,4 @@ where .get_or_try_insert_with_hash_by_ref_and_fun(self.ref_key, self.hash, init, true) .await } - - pub async fn and_upsert_with(self, f: F) -> Entry - where - F: FnOnce(Option>) -> Fut, - Fut: Future, - { - let key = Arc::new(self.ref_key.to_owned()); - self.cache.upsert_with_hash_and_fun(key, self.hash, f).await - } - - pub async fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) - where - F: FnOnce(Option>) -> Fut, - Fut: Future>, - { - let key = Arc::new(self.ref_key.to_owned()); - self.cache - .compute_with_hash_and_fun(key, self.hash, f) - .await - } - - pub async fn and_try_compute_with( - self, - f: F, - ) -> Result<(Option>, compute::PerformedOp), E> - where - F: FnOnce(Option>) -> Fut, - Fut: Future, E>>, - E: Send + Sync + 'static, - { - let key = Arc::new(self.ref_key.to_owned()); - self.cache - .try_compute_with_hash_and_fun(key, self.hash, f) - .await - } } diff --git a/src/ops.rs b/src/ops.rs index 7481ac31..a1f967d2 100644 --- a/src/ops.rs +++ b/src/ops.rs @@ -1,30 +1,38 @@ +//! Cache operations. + +/// Operations used by the `and_compute_with` and similar methods. pub mod compute { - /// Instructs the `and_compute` method how to modify the cache entry. + /// Instructs the `and_compute_with` and similar methods how to modify the cached + /// entry. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Op { - /// No-op. Do not modify the cached entry. + /// No-operation. Do not modify the cached entry. Nop, - /// Insert or replace the value of the cached entry. + /// Insert or update the value of the cached entry. Put(V), /// Remove the cached entry. Remove, } - /// Will be returned from `and_compute_with` and similar methods to indicate what - /// kind of operation was performed. + /// Will be returned by the `and_compute_with` and similar methods to indicate + /// what kind of operation was performed. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum PerformedOp { /// The entry did not exist, or already existed but was not modified. Nop, /// The entry did not exist and was inserted. Inserted, - /// The entry already existed and its value was updated. + /// The entry already existed and its value may have been updated. + /// + /// Note: `Updated` is returned if `Op::Put` was requested and the entry + /// already existed. It is _not_ related to whether the value was actually + /// updated or not. Updated, - /// The entry existed and was removed. + /// The entry already existed and was removed. /// - /// Note: If `and_compute_with` tried to remove a not-exiting entry, `Nop` - /// will be returned. + /// Note: `Nop` is returned instead of `Removed` if `Op::Remove` was + /// requested but the entry did not exist. Removed, } } From 075e37f1e15de1a1d08d97ade5435082ecad389f Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Thu, 4 Jan 2024 18:47:43 +0800 Subject: [PATCH 09/16] Compute API - Update the docs of the entry `and_try_compute_with` method of `future::Cache` --- src/future/entry_selector.rs | 323 ++++++++++++++++++++++++++++++++--- 1 file changed, 298 insertions(+), 25 deletions(-) diff --git a/src/future/entry_selector.rs b/src/future/entry_selector.rs index f30ebdb1..3de20ccc 100644 --- a/src/future/entry_selector.rs +++ b/src/future/entry_selector.rs @@ -39,8 +39,8 @@ where } } - /// This method performs a compute operation on a key by using the given closure - /// `f`. A compute operation is either put, remove or nop (no-operation). + /// Performs a compute operation on a cached entry by using the given closure + /// `f`. A compute operation is either put, remove or no-operation (nop). /// /// The closure `f` should take the current entry of `Option>` for /// the key, and return a `Future` that resolves to an `ops::compute::Op` @@ -54,19 +54,19 @@ where /// - `Op::Put(V)`: Put the new value `V` to the cache. /// - `Op::Remove`: Remove the current cached entry. /// - `Op::Nop`: Do nothing. - /// 4. Return a `(Entry, ops::compute::PerformedOp)` as the followings: + /// 4. Return an `(Option, ops::compute::PerformedOp)` as the followings: /// - /// | [`Op`] | `Entry` to return | [`PerformedOp`] | - /// |:--------- |:------------------- |:----------------------- | - /// | `Put(V)` | The current entry | `Inserted` or `Updated` | - /// | `Remove` | The _removed_ entry | `Removed` | - /// | `Nop` | The current entry | `Nop` | + /// | [`Op`] | `Entry` to return | [`PerformedOp`] | + /// |:--------- |:--------------------------- |:----------------------- | + /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | + /// | `Remove` | The _removed_ entry | `Removed` | + /// | `Nop` | The current entry or `None` | `Nop` | /// /// **Notes:** /// /// - `Op::Put(V)`: `PerformedOp::Updated` is returned when the key already /// existed in the cache. It is _not_ related to whether the value was actually - /// updated or not. + /// updated or not. It can be replaced with the same value. /// - `Op::Remove`: Unlike other ops, the _removed_ entry is returned. If you mix /// `Remove` with other ops, ensure to check whether the performed op is /// `Removed` or not. @@ -177,11 +177,11 @@ where .await } - /// This method performs a compute operation on a key by using the given closure - /// `f`. A compute operation is either put, remove or nop (no-operation). + /// Performs a compute operation on a cached entry by using the given closure + /// `f`. A compute operation is either put, remove or no-operation (nop). /// /// The closure `f` should take the current entry of `Option>` for - /// the key, and return a `Future` that resolves to an + /// the key, and return a `Future` that resolves to a /// `Result, E>`. /// /// This method works as the followings: @@ -193,21 +193,22 @@ where /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache. /// - `Ok(Op::Remove)`: Remove the current cached entry. /// - `Ok(Op::Nop)`: Do nothing. - /// 5. Return a `Ok((Entry, ops::compute::PerformedOp))` as the followings: + /// 5. Return a `Ok((Option, ops::compute::PerformedOp))` as the + /// followings: /// - /// | [`Op`] | `Entry` to return | [`PerformedOp`] | - /// |:--------- |:------------------- |:----------------------- | - /// | `Put(V)` | The current entry | `Inserted` or `Updated` | - /// | `Remove` | The _removed_ entry | `Removed` | - /// | `Nop` | The current entry | `Nop` | + /// | [`Op`] | `Entry` to return | [`PerformedOp`] | + /// |:--------- |:--------------------------- |:----------------------- | + /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | + /// | `Remove` | The _removed_ entry | `Removed` | + /// | `Nop` | The current entry or `None` | `Nop` | /// /// **Notes:** /// - /// - `Op::Put(V)`: `PerformedOp::Updated` is returned when the key already + /// - `Ok(Op::Put(V))`: `PerformedOp::Updated` is returned when the key already /// existed in the cache. It is _not_ related to whether the value was actually - /// updated or not. - /// - `Op::Remove`: Unlike other ops, the _removed_ entry is returned. If you mix - /// `Remove` with other ops, ensure to check whether the performed op is + /// updated or not. It can be replaced with the same value. + /// - `Ok(Op::Remove)`: Unlike other ops, the _removed_ entry is returned. If you + /// mix `Remove` with other ops, ensure to check whether the performed op is /// `Removed` or not. /// /// # Similar Methods @@ -250,8 +251,8 @@ where .await } - /// This method performs an upsert of an [`Entry`] by using the given closure - /// `f`. The word "upsert" here means "update" or "insert". + /// Performs an upsert of an [`Entry`] by using the given closure `f`. The word + /// "upsert" here means "update" or "insert". /// /// The closure `f` should take the current entry of `Option>` for /// the key, and return a `Future` that resolves to a new value `V`. @@ -328,7 +329,7 @@ where /// /// Note: The `is_updated` method of the `Entry` returns `true` when the key /// already existed in the cache. It is not related to whether the value was - /// actually updated or not. + /// actually updated or not. It can be replaced with the same value. /// /// # Concurrent calls on the same key /// @@ -695,6 +696,133 @@ where } } + /// Performs a compute operation on a cached entry by using the given closure + /// `f`. A compute operation is either put, remove or no-operation (nop). + /// + /// The closure `f` should take the current entry of `Option>` for + /// the key, and return a `Future` that resolves to an `ops::compute::Op` + /// enum. + /// + /// This method works as the followings: + /// + /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`. + /// 2. Resolve the `Future`, and get an `ops::compute::Op`. + /// 3. Execute the op on the cache: + /// - `Op::Put(V)`: Put the new value `V` to the cache. + /// - `Op::Remove`: Remove the current cached entry. + /// - `Op::Nop`: Do nothing. + /// 4. Return an `(Option, ops::compute::PerformedOp)` as the followings: + /// + /// | [`Op`] | `Entry` to return | [`PerformedOp`] | + /// |:--------- |:--------------------------- |:----------------------- | + /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | + /// | `Remove` | The _removed_ entry | `Removed` | + /// | `Nop` | The current entry or `None` | `Nop` | + /// + /// **Notes:** + /// + /// - `Op::Put(V)`: `PerformedOp::Updated` is returned when the key already + /// existed in the cache. It is _not_ related to whether the value was actually + /// updated or not. It can be replaced with the same value. + /// - `Op::Remove`: Unlike other ops, the _removed_ entry is returned. If you mix + /// `Remove` with other ops, ensure to check whether the performed op is + /// `Removed` or not. + /// + /// # Similar Methods + /// + /// - If you want the `Future` resolve to `Result>` instead of `Op`, and + /// upsert only when resolved to `Ok(V)`, use the [`and_try_compute_with`] + /// method. + /// - If you only want to put, use the [`and_upsert_with`] method. + /// + /// [`Entry`]: ../struct.Entry.html + /// [`Op`]: ../ops/compute/enum.Op.html + /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`and_upsert_with`]: #method.and_upsert_with + /// [`and_try_compute_with`]: #method.and_try_compute_with + /// + /// # Example + /// + /// ```rust + /// // Cargo.toml + /// // + /// // [dependencies] + /// // moka = { version = "0.12", features = ["future"] } + /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } + /// + /// use moka::{ + /// future::Cache, + /// ops::compute::{self, PerformedOp}, + /// Entry, + /// }; + /// + /// #[tokio::main] + /// async fn main() { + /// let cache: Cache = Cache::new(100); + /// let key = "key1"; + /// + /// /// Increment a cached `u64` counter. If the counter is greater than or + /// /// equal to 2, remove it. + /// async fn inclement_or_remove_counter( + /// cache: &Cache, + /// key: &str, + /// ) -> (Option>, compute::PerformedOp) { + /// cache + /// .entry_by_ref(key) + /// .and_compute_with(|maybe_entry| { + /// let op = if let Some(entry) = maybe_entry { + /// let counter = entry.into_value(); + /// if counter < 2 { + /// compute::Op::Put(counter.saturating_add(1)) // Update + /// } else { + /// compute::Op::Remove // Remove + /// } + /// } else { + /// compute::Op::Put(1) // Insert + /// }; + /// // Return a Future that is resolved to `op` immediately. + /// std::future::ready(op) + /// }) + /// .await + /// } + /// + /// // This should insert a now counter value 1 to the cache, and return the + /// // value with the kind of the operation performed. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 1); + /// assert_eq!(performed_op, PerformedOp::Inserted); + /// + /// // This should increment the cached counter value by 1. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 2); + /// assert_eq!(performed_op, PerformedOp::Updated); + /// + /// // This should remove the cached counter from the cache, and returns the + /// // _removed_ value. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 2); + /// assert_eq!(performed_op, PerformedOp::Removed); + /// + /// // The key should no longer exist. + /// assert!(!cache.contains_key(key)); + /// + /// // This should start over; insert a new counter value 1 to the cache. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 1); + /// assert_eq!(performed_op, PerformedOp::Inserted); + /// } + /// ``` + /// + /// # Concurrent calls on the same key + /// + /// This method guarantees that concurrent calls on the same key are executed + /// serially. That is, `and_compute_with` calls on the same key never run + /// concurrently. The calls are serialized by the order of their invocation. It + /// uses a key-level lock to achieve this. pub async fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) where F: FnOnce(Option>) -> Fut, @@ -706,6 +834,65 @@ where .await } + /// Performs a compute operation on a cached entry by using the given closure + /// `f`. A compute operation is either put, remove or no-operation (nop). + /// + /// The closure `f` should take the current entry of `Option>` for + /// the key, and return a `Future` that resolves to a + /// `Result, E>`. + /// + /// This method works as the followings: + /// + /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`. + /// 2. Resolve the `Future`, and get a `Result, E>`. + /// 3. If resolved to `Err(E)`, return it. + /// 4. Else, execute the op on the cache: + /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache. + /// - `Ok(Op::Remove)`: Remove the current cached entry. + /// - `Ok(Op::Nop)`: Do nothing. + /// 5. Return a `Ok((Option, ops::compute::PerformedOp))` as the + /// followings: + /// + /// | [`Op`] | `Entry` to return | [`PerformedOp`] | + /// |:--------- |:--------------------------- |:----------------------- | + /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | + /// | `Remove` | The _removed_ entry | `Removed` | + /// | `Nop` | The current entry or `None` | `Nop` | + /// + /// **Notes:** + /// + /// - `Ok(Op::Put(V))`: `PerformedOp::Updated` is returned when the key already + /// existed in the cache. It is _not_ related to whether the value was actually + /// updated or not. It can be replaced with the same value. + /// - `Ok(Op::Remove)`: Unlike other ops, the _removed_ entry is returned. If you + /// mix `Remove` with other ops, ensure to check whether the performed op is + /// `Removed` or not. + /// + /// # Similar Methods + /// + /// - If you want the `Future` resolve to `Op` instead of `Result>`, use + /// the [`and_compute_with`] method. + /// - If you only want to put, use the [`and_upsert_with`] method. + /// + /// [`Entry`]: ../struct.Entry.html + /// [`Op`]: ../ops/compute/enum.Op.html + /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`and_upsert_with`]: #method.and_upsert_with + /// [`and_compute_with`]: #method.and_compute_with + /// + /// # Example + /// + /// See [`try_append_value_async.rs`] in the `examples` directory. + /// + /// [`try_append_value_async.rs`]: + /// https://github.com/moka-rs/moka/tree/main/examples/try_append_value_async.rs + /// + /// # Concurrent calls on the same key + /// + /// This method guarantees that concurrent calls on the same key are executed + /// serially. That is, `and_try_compute_with` calls on the same key never run + /// concurrently. The calls are serialized by the order of their invocation. It + /// uses a key-level lock to achieve this. pub async fn and_try_compute_with( self, f: F, @@ -721,6 +908,92 @@ where .await } + /// Performs an upsert of an [`Entry`] by using the given closure `f`. The word + /// "upsert" here means "update" or "insert". + /// + /// The closure `f` should take the current entry of `Option>` for + /// the key, and return a `Future` that resolves to a new value `V`. + /// + /// This method works as the followings: + /// + /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`. + /// 2. Resolve the `Future`, and get a new value `V`. + /// 3. Upsert the new value to the cache. + /// 4. Return the `Entry` having the upserted value. + /// + /// # Similar Methods + /// + /// - If you want to optionally upsert, that is to upsert only when certain + /// conditions meet, use the [`and_compute_with`] method. + /// - If you try to upsert, that is to make the `Future` resolve to `Result` + /// instead of `V`, and upsert only when resolved to `Ok(V)`, use the + /// [`and_try_compute_with`] method. + /// + /// [`Entry`]: ../struct.Entry.html + /// [`and_compute_with`]: #method.and_compute_with + /// [`and_try_compute_with`]: #method.and_try_compute_with + /// + /// # Example + /// + /// ```rust + /// // Cargo.toml + /// // + /// // [dependencies] + /// // moka = { version = "0.12", features = ["future"] } + /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } + /// + /// use moka::future::Cache; + /// + /// #[tokio::main] + /// async fn main() { + /// let cache: Cache = Cache::new(100); + /// let key = "key1"; + /// + /// let entry = cache + /// .entry_by_ref(key) + /// .and_upsert_with(|maybe_entry| { + /// let counter = if let Some(entry) = maybe_entry { + /// entry.into_value().saturating_add(1) // Update + /// } else { + /// 1 // Insert + /// }; + /// // Return a Future that is resolved to `counter` immediately. + /// std::future::ready(counter) + /// }) + /// .await; + /// // It was not an update. + /// assert!(!entry.is_updated()); + /// assert_eq!(entry.key(), &key); + /// assert_eq!(entry.into_value(), 1); + /// + /// let entry = cache + /// .entry_by_ref(key) + /// .and_upsert_with(|maybe_entry| { + /// let counter = if let Some(entry) = maybe_entry { + /// entry.into_value().saturating_add(1) + /// } else { + /// 1 + /// }; + /// std::future::ready(counter) + /// }) + /// .await; + /// // It was an update. + /// assert!(entry.is_updated()); + /// assert_eq!(entry.key(), &key); + /// assert_eq!(entry.into_value(), 2); + /// } + /// ``` + /// + /// Note: The `is_updated` method of the `Entry` returns `true` when the key + /// already existed in the cache. It is not related to whether the value was + /// actually updated or not. It can be replaced with the same value. + /// + /// # Concurrent calls on the same key + /// + /// This method guarantees that concurrent calls on the same key are executed + /// serially. That is, `and_upsert_with` calls on the same key never run + /// concurrently. The calls are serialized by the order of their invocation. It + /// uses a key-level lock to achieve this. pub async fn and_upsert_with(self, f: F) -> Entry where F: FnOnce(Option>) -> Fut, From b992147f387e44f589f5594f2e31ff0c8263912d Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Thu, 4 Jan 2024 18:48:12 +0800 Subject: [PATCH 10/16] Update the roadmap --- README.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 12798b80..9d635dca 100644 --- a/README.md +++ b/README.md @@ -517,21 +517,20 @@ $ cargo +nightly -Z unstable-options --config 'build.rustdocflags="--cfg docsrs" ## Road Map - [x] Size-aware eviction. (`v0.7.0` via [#24][gh-pull-024]) -- [x] API stabilization. (Smaller core cache API, shorter names for frequently - used methods) (`v0.8.0` via [#105][gh-pull-105]) +- [x] API stabilization. (Smaller core API, shorter names for frequently used + methods) (`v0.8.0` via [#105][gh-pull-105]) - e.g. - `get_or_insert_with(K, F)` → `get_with(K, F)` - `get_or_try_insert_with(K, F)` → `try_get_with(K, F)` - - `blocking_insert(K, V)` → `blocking().insert(K, V)` - `time_to_live()` → `policy().time_to_live()` - [x] Notifications on eviction. (`v0.9.0` via [#145][gh-pull-145]) - [x] Variable (per-entry) expiration, using hierarchical timer wheels. (`v0.11.0` via [#248][gh-pull-248]) -- [ ] Cache statistics (Hit rate, etc.). ([details][cache-stats]) - [x] Remove background threads. (`v0.12.0` via [#294][gh-pull-294] and [#316][gh-pull-316]) +- [x] Add upsert and compute methods. (`v0.12.3` via [#370][gh-pull-370]) +- [ ] Cache statistics (Hit rate, etc.). ([details][cache-stats]) - [ ] Restore cache from a snapshot. ([details][restore]) -- [ ] `and_compute` method. ([details][and-compute]) - [ ] Upgrade TinyLFU to Window-TinyLFU. ([details][tiny-lfu]) [gh-pull-024]: https://github.com/moka-rs/moka/pull/24 @@ -540,8 +539,8 @@ $ cargo +nightly -Z unstable-options --config 'build.rustdocflags="--cfg docsrs" [gh-pull-248]: https://github.com/moka-rs/moka/pull/248 [gh-pull-294]: https://github.com/moka-rs/moka/pull/294 [gh-pull-316]: https://github.com/moka-rs/moka/pull/316 +[gh-pull-370]: https://github.com/moka-rs/moka/pull/370 -[and-compute]: https://github.com/moka-rs/moka/issues/227 [cache-stats]: https://github.com/moka-rs/moka/issues/234 [restore]: https://github.com/moka-rs/moka/issues/314 From e5b95c1d9d9500348eb3e6047e487b2298f1ecbd Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Thu, 4 Jan 2024 22:52:06 +0800 Subject: [PATCH 11/16] Compute API - Add entry `and_try_compute_with` and other methods to `sync::Cache` --- README.md | 2 +- src/future.rs | 4 +- src/future/cache.rs | 73 +++-- src/future/value_initializer.rs | 16 +- src/sync.rs | 6 +- src/sync/cache.rs | 498 +++++++++++++++++++++++++++++++- src/sync/entry_selector.rs | 58 +++- src/sync/value_initializer.rs | 200 ++++++++++++- 8 files changed, 788 insertions(+), 69 deletions(-) diff --git a/README.md b/README.md index 9d635dca..1502097e 100644 --- a/README.md +++ b/README.md @@ -514,7 +514,7 @@ $ cargo +nightly -Z unstable-options --config 'build.rustdocflags="--cfg docsrs" doc --no-deps --features 'future, sync' ``` -## Road Map +## Roadmap - [x] Size-aware eviction. (`v0.7.0` via [#24][gh-pull-024]) - [x] API stabilization. (Smaller core API, shorter names for frequently used diff --git a/src/future.rs b/src/future.rs index 483b5785..654b3eb7 100644 --- a/src/future.rs +++ b/src/future.rs @@ -35,10 +35,10 @@ pub type PredicateId = String; pub(crate) type PredicateIdStr<'a> = &'a str; -// Empty struct to be used in InitResult::InitErr to represent the Option None. +// Empty struct to be used in `InitResult::InitErr` to represent the Option None. pub(crate) struct OptionallyNone; -// Empty struct to be used in InitResult::InitErr to represent the Compute None. +// Empty struct to be used in `InitResult::InitErr` to represent the Compute None. pub(crate) struct ComputeNone; impl FutureExt for T where T: Future {} diff --git a/src/future/cache.rs b/src/future/cache.rs index 5f85809a..edea7ee0 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -1830,38 +1830,6 @@ where cancel_guard.clear(); } - pub(crate) async fn upsert_with_hash_and_fun( - &self, - key: Arc, - hash: u64, - f: F, - ) -> Entry - where - F: FnOnce(Option>) -> Fut, - Fut: Future, - { - let type_id = ValueInitializer::::type_id_for_compute_with(); - let post_init = ValueInitializer::::post_init_for_upsert_with; - - match self - .value_initializer - .try_compute(&key, hash, type_id, self, f, post_init) - .await - { - ComputeResult::Inserted(value) => { - crossbeam_epoch::pin().flush(); - Entry::new(Some(key), value, true, false) - } - ComputeResult::Updated(value) => { - crossbeam_epoch::pin().flush(); - Entry::new(Some(key), value, true, true) - } - ComputeResult::Nop(_) | ComputeResult::Removed(_) | ComputeResult::EvalErr(_) => { - unreachable!() - } - } - } - pub(crate) async fn compute_with_hash_and_fun( &self, key: Arc, @@ -1872,12 +1840,11 @@ where F: FnOnce(Option>) -> Fut, Fut: Future>, { - let type_id = ValueInitializer::::type_id_for_compute_with(); let post_init = ValueInitializer::::post_init_for_compute_with; match self .value_initializer - .try_compute(&key, hash, type_id, self, f, post_init) + .try_compute(&key, hash, self, f, post_init, true) .await { ComputeResult::Nop(maybe_value) => { @@ -1917,12 +1884,11 @@ where Fut: Future, E>>, E: Send + Sync + 'static, { - let type_id = ValueInitializer::::type_id_for_compute_with(); let post_init = ValueInitializer::::post_init_for_try_compute_with; match self .value_initializer - .try_compute(&key, hash, type_id, self, f, post_init) + .try_compute(&key, hash, self, f, post_init, true) .await { ComputeResult::Nop(maybe_value) => { @@ -1949,6 +1915,37 @@ where } } + pub(crate) async fn upsert_with_hash_and_fun( + &self, + key: Arc, + hash: u64, + f: F, + ) -> Entry + where + F: FnOnce(Option>) -> Fut, + Fut: Future, + { + let post_init = ValueInitializer::::post_init_for_upsert_with; + + match self + .value_initializer + .try_compute(&key, hash, self, f, post_init, false) + .await + { + ComputeResult::Inserted(value) => { + crossbeam_epoch::pin().flush(); + Entry::new(Some(key), value, true, false) + } + ComputeResult::Updated(value) => { + crossbeam_epoch::pin().flush(); + Entry::new(Some(key), value, true, true) + } + ComputeResult::Nop(_) | ComputeResult::Removed(_) | ComputeResult::EvalErr(_) => { + unreachable!() + } + } + } + async fn invalidate_with_hash(&self, key: &Q, hash: u64, need_value: bool) -> Option where K: Borrow, @@ -4563,7 +4560,7 @@ mod tests { } }; - let ((ent1, op1), (ent2, op2), (ent3, op3), (ent4, op4), (ent5, op5), (v6, op6)) = + let ((ent1, op1), (ent2, op2), (ent3, op3), (ent4, op4), (ent5, op5), (ent6, op6)) = futures_util::join!(task1, task2, task3, task4, task5, task6); assert_eq!(op1, compute::PerformedOp::Inserted); assert_eq!(op2, compute::PerformedOp::Updated); @@ -4590,7 +4587,7 @@ mod tests { vec![5] ); assert_eq!( - *v6.expect("should have entry").into_value().read().await, + *ent6.expect("should have entry").into_value().read().await, vec![5] ); } diff --git a/src/future/value_initializer.rs b/src/future/value_initializer.rs index 8aee5970..aa49c438 100644 --- a/src/future/value_initializer.rs +++ b/src/future/value_initializer.rs @@ -290,10 +290,10 @@ where &'a self, c_key: &Arc, c_hash: u64, - type_id: TypeId, cache: &C, // Future to initialize a new value. f: F, post_init: fn(O) -> Result, E>, + allow_nop: bool, ) -> ComputeResult where C: GetOrInsert + Send + 'a, @@ -304,6 +304,8 @@ where use std::panic::{resume_unwind, AssertUnwindSafe}; use ComputeResult::{EvalErr, Inserted, Nop, Removed, Updated}; + let type_id = TypeId::of::(); + let (w_key, w_hash) = waiter_key_hash(&self.waiters, c_key, type_id); let waiter = TrioArc::new(RwLock::new(WaiterValue::Computing)); @@ -342,8 +344,11 @@ where // Get the current value. let maybe_entry = cache.get_entry_without_recording(c_key, c_hash).await; - // TODO: Avoid cloning if possible. - let maybe_value = maybe_entry.as_ref().map(|ent| ent.value().clone()); + let maybe_value = if allow_nop { + maybe_entry.as_ref().map(|ent| ent.value().clone()) + } else { + None + }; let entry_existed = maybe_entry.is_some(); // Let's evaluate the `f` closure and get a future. Catching panic is safe @@ -453,11 +458,6 @@ where pub(crate) fn type_id_for_try_get_with() -> TypeId { TypeId::of::() } - - /// Returns the `type_id` for `and_compute_with` method of cache. - pub(crate) fn type_id_for_compute_with() -> TypeId { - TypeId::of::() - } } #[inline] diff --git a/src/sync.rs b/src/sync.rs index 67529062..ec3e3988 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -20,6 +20,8 @@ pub trait ConcurrentCacheExt { fn sync(&self); } -// Empty internal struct to be used in optionally_get_with to represent the None -// results. +// Empty struct to be used in `InitResult::InitErr` to represent the Option None. pub(crate) struct OptionallyNone; + +// Empty struct to be used in `InitResult::InitErr`` to represent the Compute None. +pub(crate) struct ComputeNone; diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 54986a38..618c7afb 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -1,5 +1,5 @@ use super::{ - value_initializer::{InitResult, ValueInitializer}, + value_initializer::{ComputeResult, GetOrInsert, InitResult, ValueInitializer}, CacheBuilder, OwnedKeyEntrySelector, RefKeyEntrySelector, }; use crate::{ @@ -10,6 +10,7 @@ use crate::{ time::Instant, }, notification::EvictionListener, + ops::compute, policy::ExpirationPolicy, sync::{Iter, PredicateId}, sync_base::{ @@ -1501,6 +1502,111 @@ where .expect("Failed to insert"); } + pub(crate) fn compute_with_hash_and_fun( + &self, + key: Arc, + hash: u64, + f: F, + ) -> (Option>, compute::PerformedOp) + where + F: FnOnce(Option>) -> compute::Op, + { + let post_init = ValueInitializer::::post_init_for_compute_with; + + match self + .value_initializer + .try_compute(&key, hash, self, f, post_init, true) + { + ComputeResult::Nop(maybe_value) => { + let maybe_entry = + maybe_value.map(|value| Entry::new(Some(key), value, false, false)); + (maybe_entry, compute::PerformedOp::Nop) + } + ComputeResult::Inserted(value) => { + crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(key), value, true, false); + (Some(entry), compute::PerformedOp::Inserted) + } + ComputeResult::Updated(value) => { + crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(key), value, true, true); + (Some(entry), compute::PerformedOp::Updated) + } + ComputeResult::Removed(value) => { + crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(key), value, false, false); + (Some(entry), compute::PerformedOp::Removed) + } + ComputeResult::EvalErr(_) => { + unreachable!() + } + } + } + + pub(crate) fn try_compute_with_hash_and_fun( + &self, + key: Arc, + hash: u64, + f: F, + ) -> Result<(Option>, compute::PerformedOp), E> + where + F: FnOnce(Option>) -> Result, E>, + E: Send + Sync + 'static, + { + let post_init = ValueInitializer::::post_init_for_try_compute_with; + + match self + .value_initializer + .try_compute(&key, hash, self, f, post_init, true) + { + ComputeResult::Nop(maybe_value) => { + let maybe_entry = + maybe_value.map(|value| Entry::new(Some(key), value, false, false)); + Ok((maybe_entry, compute::PerformedOp::Nop)) + } + ComputeResult::Inserted(value) => { + crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(key), value, true, false); + Ok((Some(entry), compute::PerformedOp::Inserted)) + } + ComputeResult::Updated(value) => { + crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(key), value, true, true); + Ok((Some(entry), compute::PerformedOp::Updated)) + } + ComputeResult::Removed(value) => { + crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(key), value, false, false); + Ok((Some(entry), compute::PerformedOp::Removed)) + } + ComputeResult::EvalErr(e) => Err(e), + } + } + + pub(crate) fn upsert_with_hash_and_fun(&self, key: Arc, hash: u64, f: F) -> Entry + where + F: FnOnce(Option>) -> V, + { + let post_init = ValueInitializer::::post_init_for_upsert_with; + + match self + .value_initializer + .try_compute(&key, hash, self, f, post_init, false) + { + ComputeResult::Inserted(value) => { + crossbeam_epoch::pin().flush(); + Entry::new(Some(key), value, true, false) + } + ComputeResult::Updated(value) => { + crossbeam_epoch::pin().flush(); + Entry::new(Some(key), value, true, true) + } + ComputeResult::Nop(_) | ComputeResult::Removed(_) | ComputeResult::EvalErr(_) => { + unreachable!() + } + } + } + /// Discards any cached value for the key. /// /// If you need to get a the value that has been discarded, use the @@ -1792,6 +1898,27 @@ where } } +impl GetOrInsert for Cache +where + K: Hash + Eq + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + S: BuildHasher + Clone + Send + Sync + 'static, +{ + fn get_entry_without_recording(&self, key: &Arc, hash: u64) -> Option> { + let ignore_if = None as Option<&mut fn(&V) -> bool>; + self.base + .get_with_hash_and_ignore_if(key, hash, ignore_if, true) + } + + fn insert(&self, key: Arc, hash: u64, value: V) { + self.insert_with_hash(key.clone(), hash, value); + } + + fn remove(&self, key: &Arc, hash: u64) -> Option { + self.invalidate_with_hash(key, hash, true) + } +} + // For unit tests. #[cfg(test)] impl Cache @@ -3896,6 +4023,375 @@ mod tests { } } + #[test] + fn upsert_with() { + use std::thread::{sleep, spawn}; + + let cache = Cache::new(100); + const KEY: u32 = 0; + + // Spawn three threads to call `and_upsert_with` for the same key and each + // task increments the current value by 1. Ensure the key-level lock is + // working by verifying the value is 3 after all tasks finish. + // + // | | thread 1 | thread 2 | thread 3 | + // |--------|----------|----------|----------| + // | 0 ms | get none | | | + // | 100 ms | | blocked | | + // | 200 ms | insert 1 | | | + // | | | get 1 | | + // | 300 ms | | | blocked | + // | 400 ms | | insert 2 | | + // | | | | get 2 | + // | 500 ms | | | insert 3 | + + let thread1 = { + let cache1 = cache.clone(); + spawn(move || { + cache1.entry(KEY).and_upsert_with(|maybe_entry| { + sleep(Duration::from_millis(200)); + assert!(maybe_entry.is_none()); + 1 + }) + }) + }; + + let thread2 = { + let cache2 = cache.clone(); + spawn(move || { + sleep(Duration::from_millis(100)); + cache2.entry_by_ref(&KEY).and_upsert_with(|maybe_entry| { + sleep(Duration::from_millis(200)); + let entry = maybe_entry.expect("The entry should exist"); + entry.into_value() + 1 + }) + }) + }; + + let thread3 = { + let cache3 = cache.clone(); + spawn(move || { + sleep(Duration::from_millis(300)); + cache3.entry_by_ref(&KEY).and_upsert_with(|maybe_entry| { + sleep(Duration::from_millis(100)); + let entry = maybe_entry.expect("The entry should exist"); + entry.into_value() + 1 + }) + }) + }; + + let ent1 = thread1.join().expect("Thread 1 should finish"); + let ent2 = thread2.join().expect("Thread 2 should finish"); + let ent3 = thread3.join().expect("Thread 3 should finish"); + assert_eq!(ent1.into_value(), 1); + assert_eq!(ent2.into_value(), 2); + assert_eq!(ent3.into_value(), 3); + + assert_eq!(cache.get(&KEY), Some(3)); + } + + #[test] + fn compute_with() { + use crate::ops::compute; + use std::{ + sync::RwLock, + thread::{sleep, spawn}, + }; + + let cache = Cache::new(100); + const KEY: u32 = 0; + + // Spawn six threads to call `and_compute_with` for the same key. Ensure the + // key-level lock is working by verifying the value after all tasks finish. + // + // | | thread 1 | thread 2 | thread 3 | thread 4 | thread 5 | thread 6 | + // |---------|------------|---------------|------------|----------|------------|----------| + // | 0 ms | get none | | | | | | + // | 100 ms | | blocked | | | | | + // | 200 ms | insert [1] | | | | | | + // | | | get [1] | | | | | + // | 300 ms | | | blocked | | | | + // | 400 ms | | insert [1, 2] | | | | | + // | | | | get [1, 2] | | | | + // | 500 ms | | | | blocked | | | + // | 600 ms | | | remove | | | | + // | | | | | get none | | | + // | 700 ms | | | | | blocked | | + // | 800 ms | | | | nop | | | + // | | | | | | get none | | + // | 900 ms | | | | | | blocked | + // | 1000 ms | | | | | insert [5] | | + // | | | | | | | get [5] | + // | 1100 ms | | | | | | nop | + + let thread1 = { + let cache1 = cache.clone(); + spawn(move || { + cache1.entry(KEY).and_compute_with(|maybe_entry| { + sleep(Duration::from_millis(200)); + assert!(maybe_entry.is_none()); + compute::Op::Put(Arc::new(RwLock::new(vec![1]))) + }) + }) + }; + + let thread2 = { + let cache2 = cache.clone(); + spawn(move || { + sleep(Duration::from_millis(100)); + cache2.entry_by_ref(&KEY).and_compute_with(|maybe_entry| { + let entry = maybe_entry.expect("The entry should exist"); + let value = entry.into_value(); + assert_eq!(*value.read().unwrap(), vec![1]); + sleep(Duration::from_millis(200)); + value.write().unwrap().push(2); + compute::Op::Put(value) + }) + }) + }; + + let thread3 = { + let cache3 = cache.clone(); + spawn(move || { + sleep(Duration::from_millis(300)); + cache3.entry(KEY).and_compute_with(|maybe_entry| { + let entry = maybe_entry.expect("The entry should exist"); + let value = entry.into_value(); + assert_eq!(*value.read().unwrap(), vec![1, 2]); + sleep(Duration::from_millis(200)); + compute::Op::Remove + }) + }) + }; + + let thread4 = { + let cache4 = cache.clone(); + spawn(move || { + sleep(Duration::from_millis(500)); + cache4.entry(KEY).and_compute_with(|maybe_entry| { + assert!(maybe_entry.is_none()); + sleep(Duration::from_millis(200)); + compute::Op::Nop + }) + }) + }; + + let thread5 = { + let cache5 = cache.clone(); + spawn(move || { + sleep(Duration::from_millis(700)); + cache5.entry_by_ref(&KEY).and_compute_with(|maybe_entry| { + assert!(maybe_entry.is_none()); + sleep(Duration::from_millis(200)); + compute::Op::Put(Arc::new(RwLock::new(vec![5]))) + }) + }) + }; + + let thread6 = { + let cache6 = cache.clone(); + spawn(move || { + sleep(Duration::from_millis(900)); + cache6.entry_by_ref(&KEY).and_compute_with(|maybe_entry| { + let entry = maybe_entry.expect("The entry should exist"); + let value = entry.into_value(); + assert_eq!(*value.read().unwrap(), vec![5]); + sleep(Duration::from_millis(100)); + compute::Op::Nop + }) + }) + }; + + let (ent1, op1) = thread1.join().expect("Thread 1 should finish"); + let (ent2, op2) = thread2.join().expect("Thread 2 should finish"); + let (ent3, op3) = thread3.join().expect("Thread 3 should finish"); + let (ent4, op4) = thread4.join().expect("Thread 4 should finish"); + let (ent5, op5) = thread5.join().expect("Thread 5 should finish"); + let (ent6, op6) = thread6.join().expect("Thread 6 should finish"); + assert_eq!(op1, compute::PerformedOp::Inserted); + assert_eq!(op2, compute::PerformedOp::Updated); + assert_eq!(op3, compute::PerformedOp::Removed); + assert_eq!(op4, compute::PerformedOp::Nop); + assert_eq!(op5, compute::PerformedOp::Inserted); + assert_eq!(op6, compute::PerformedOp::Nop); + + assert_eq!( + *ent1 + .expect("should have entry") + .into_value() + .read() + .unwrap(), + vec![1, 2] // The same Vec was modified by task2. + ); + assert_eq!( + *ent2 + .expect("should have entry") + .into_value() + .read() + .unwrap(), + vec![1, 2] + ); + assert_eq!( + *ent3 + .expect("should have entry") + .into_value() + .read() + .unwrap(), + vec![1, 2] // Removed value + ); + assert!(ent4.is_none(),); + assert_eq!( + *ent5 + .expect("should have entry") + .into_value() + .read() + .unwrap(), + vec![5] + ); + assert_eq!( + *ent6 + .expect("should have entry") + .into_value() + .read() + .unwrap(), + vec![5] + ); + } + + #[test] + fn try_compute_with() { + use crate::ops::compute; + use std::{ + sync::RwLock, + thread::{sleep, spawn}, + }; + + let cache: Cache>>> = Cache::new(100); + const KEY: u32 = 0; + + // Spawn four threads to call `and_try_compute_with` for the same key. Ensure + // the key-level lock is working by verifying the value after all tasks + // finish. + // + // | | thread 1 | thread 2 | thread 3 | thread 4 | + // |---------|------------|---------------|------------|------------| + // | 0 ms | get none | | | | + // | 100 ms | | blocked | | | + // | 200 ms | insert [1] | | | | + // | | | get [1] | | | + // | 300 ms | | | blocked | | + // | 400 ms | | insert [1, 2] | | | + // | | | | get [1, 2] | | + // | 500 ms | | | | blocked | + // | 600 ms | | | err | | + // | | | | | get [1, 2] | + // | 700 ms | | | | remove | + // + // This test is shorter than `compute_with` test because this one omits `Nop` + // cases. + + let thread1 = { + let cache1 = cache.clone(); + spawn(move || { + cache1.entry(KEY).and_try_compute_with(|maybe_entry| { + sleep(Duration::from_millis(200)); + assert!(maybe_entry.is_none()); + Ok(compute::Op::Put(Arc::new(RwLock::new(vec![1])))) as Result<_, ()> + }) + }) + }; + + let thread2 = { + let cache2 = cache.clone(); + spawn(move || { + sleep(Duration::from_millis(100)); + cache2 + .entry_by_ref(&KEY) + .and_try_compute_with(|maybe_entry| { + let entry = maybe_entry.expect("The entry should exist"); + let value = entry.into_value(); + assert_eq!(*value.read().unwrap(), vec![1]); + sleep(Duration::from_millis(200)); + value.write().unwrap().push(2); + Ok(compute::Op::Put(value)) as Result<_, ()> + }) + }) + }; + + let thread3 = { + let cache3 = cache.clone(); + spawn(move || { + sleep(Duration::from_millis(300)); + cache3.entry(KEY).and_try_compute_with(|maybe_entry| { + let entry = maybe_entry.expect("The entry should exist"); + let value = entry.into_value(); + assert_eq!(*value.read().unwrap(), vec![1, 2]); + sleep(Duration::from_millis(200)); + Err(()) + }) + }) + }; + + let thread4 = { + let cache4 = cache.clone(); + spawn(move || { + sleep(Duration::from_millis(500)); + cache4.entry(KEY).and_try_compute_with(|maybe_entry| { + let entry = maybe_entry.expect("The entry should exist"); + let value = entry.into_value(); + assert_eq!(*value.read().unwrap(), vec![1, 2]); + sleep(Duration::from_millis(100)); + Ok(compute::Op::Remove) as Result<_, ()> + }) + }) + }; + + let res1 = thread1.join().expect("Thread 1 should finish"); + let res2 = thread2.join().expect("Thread 2 should finish"); + let res3 = thread3.join().expect("Thread 3 should finish"); + let res4 = thread4.join().expect("Thread 4 should finish"); + + let Ok((ent1, op1)) = res1 else { + panic!("res1 should be an Ok") + }; + let Ok((ent2, op2)) = res2 else { + panic!("res2 should be an Ok") + }; + assert!(res3.is_err()); + let Ok((ent4, op4)) = res4 else { + panic!("res4 should be an Ok") + }; + + assert_eq!(op1, compute::PerformedOp::Inserted); + assert_eq!(op2, compute::PerformedOp::Updated); + assert_eq!(op4, compute::PerformedOp::Removed); + + assert_eq!( + *ent1 + .expect("should have entry") + .into_value() + .read() + .unwrap(), + vec![1, 2] // The same Vec was modified by task2. + ); + assert_eq!( + *ent2 + .expect("should have entry") + .into_value() + .read() + .unwrap(), + vec![1, 2] + ); + assert_eq!( + *ent4 + .expect("should have entry") + .into_value() + .read() + .unwrap(), + vec![1, 2] // Removed value. + ); + } + #[test] // https://github.com/moka-rs/moka/issues/43 fn handle_panic_in_get_with() { diff --git a/src/sync/entry_selector.rs b/src/sync/entry_selector.rs index 11d65363..c6385fc3 100644 --- a/src/sync/entry_selector.rs +++ b/src/sync/entry_selector.rs @@ -1,4 +1,4 @@ -use crate::Entry; +use crate::{ops::compute, Entry}; use super::Cache; @@ -38,6 +38,34 @@ where } } + pub fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) + where + F: FnOnce(Option>) -> compute::Op, + { + let key = Arc::new(self.owned_key); + self.cache.compute_with_hash_and_fun(key, self.hash, f) + } + + pub fn and_try_compute_with( + self, + f: F, + ) -> Result<(Option>, compute::PerformedOp), E> + where + F: FnOnce(Option>) -> Result, E>, + E: Send + Sync + 'static, + { + let key = Arc::new(self.owned_key); + self.cache.try_compute_with_hash_and_fun(key, self.hash, f) + } + + pub fn and_upsert_with(self, f: F) -> Entry + where + F: FnOnce(Option>) -> V, + { + let key = Arc::new(self.owned_key); + self.cache.upsert_with_hash_and_fun(key, self.hash, f) + } + /// Returns the corresponding [`Entry`] for the key given when this entry /// selector was constructed. If the entry does not exist, inserts one by calling /// the [`default`][std-default-function] function of the value type `V`. @@ -325,6 +353,34 @@ where } } + pub fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) + where + F: FnOnce(Option>) -> compute::Op, + { + let key = Arc::new(self.ref_key.to_owned()); + self.cache.compute_with_hash_and_fun(key, self.hash, f) + } + + pub fn and_try_compute_with( + self, + f: F, + ) -> Result<(Option>, compute::PerformedOp), E> + where + F: FnOnce(Option>) -> Result, E>, + E: Send + Sync + 'static, + { + let key = Arc::new(self.ref_key.to_owned()); + self.cache.try_compute_with_hash_and_fun(key, self.hash, f) + } + + pub fn and_upsert_with(self, f: F) -> Entry + where + F: FnOnce(Option>) -> V, + { + let key = Arc::new(self.ref_key.to_owned()); + self.cache.upsert_with_hash_and_fun(key, self.hash, f) + } + /// Returns the corresponding [`Entry`] for the reference of the key given when /// this entry selector was constructed. If the entry does not exist, inserts one /// by cloning the key and calling the [`default`][std-default-function] function diff --git a/src/sync/value_initializer.rs b/src/sync/value_initializer.rs index 8bbdc045..88061132 100644 --- a/src/sync/value_initializer.rs +++ b/src/sync/value_initializer.rs @@ -1,17 +1,50 @@ use parking_lot::RwLock; use std::{ any::{Any, TypeId}, + fmt, hash::{BuildHasher, Hash}, sync::Arc, }; use triomphe::Arc as TrioArc; -use super::OptionallyNone; +use crate::{ops::compute, Entry}; + +use super::{ComputeNone, OptionallyNone}; const WAITER_MAP_NUM_SEGMENTS: usize = 64; +pub(crate) trait GetOrInsert { + fn get_entry_without_recording(&self, key: &Arc, hash: u64) -> Option> + where + V: 'static; + + fn insert(&self, key: Arc, hash: u64, value: V); + + fn remove(&self, key: &Arc, hash: u64) -> Option; +} + type ErrorObject = Arc; -type WaiterValue = Option>; + +// type WaiterValue = Option>; +enum WaiterValue { + Computing, + Ready(Result), + ReadyNone, + // https://github.com/moka-rs/moka/issues/43 + InitFuturePanicked, +} + +impl fmt::Debug for WaiterValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + WaiterValue::Computing => write!(f, "Computing"), + WaiterValue::Ready(_) => write!(f, "Ready"), + WaiterValue::ReadyNone => write!(f, "ReadyNone"), + WaiterValue::InitFuturePanicked => write!(f, "InitFuturePanicked"), + } + } +} + type Waiter = TrioArc>>; pub(crate) enum InitResult { @@ -20,6 +53,14 @@ pub(crate) enum InitResult { InitErr(Arc), } +pub(crate) enum ComputeResult { + Inserted(V), + Updated(V), + Removed(V), + Nop(Option), + EvalErr(E), +} + pub(crate) struct ValueInitializer { // TypeId is the type ID of the concrete error type of generic type E in the // try_get_with method. We use the type ID as a part of the key to ensure that @@ -70,7 +111,7 @@ where let (w_key, w_hash) = self.waiter_key_hash(key, type_id); - let waiter = TrioArc::new(RwLock::new(None)); + let waiter = TrioArc::new(RwLock::new(WaiterValue::Computing)); let mut lock = waiter.write(); loop { @@ -82,10 +123,10 @@ where // Somebody else's waiter already exists, so wait for its result to become available. let waiter_result = existing_waiter.read(); match &*waiter_result { - Some(Ok(value)) => return ReadExisting(value.clone()), - Some(Err(e)) => return InitErr(Arc::clone(e).downcast().unwrap()), - // None means somebody else's init closure has been panicked. - None => { + WaiterValue::Ready(Ok(value)) => return ReadExisting(value.clone()), + WaiterValue::Ready(Err(e)) => return InitErr(Arc::clone(e).downcast().unwrap()), + // Somebody else's init closure has been panicked. + WaiterValue::InitFuturePanicked => { retries += 1; assert!( retries < MAX_RETRIES, @@ -96,6 +137,11 @@ where // Retry from the beginning. continue; } + // Unexpected state. + s @ (WaiterValue::Computing | WaiterValue::ReadyNone) => panic!( + "Got unexpected state `{s:?}` after resolving `init` future. \ + This might be a bug in Moka" + ), } } @@ -105,7 +151,7 @@ where if let Some(value) = get() { // Yes. Set the waiter value, remove our waiter, and return // the existing value. - *lock = Some(Ok(value.clone())); + *lock = WaiterValue::Ready(Ok(value.clone())); self.remove_waiter(w_key, w_hash); return InitResult::ReadExisting(value); } @@ -116,26 +162,25 @@ where match catch_unwind(AssertUnwindSafe(init)) { // Evaluated. Ok(value) => { - let (waiter_val, init_res) = match post_init(value) { + let init_res = match post_init(value) { Ok(value) => { insert(value.clone()); - (Some(Ok(value.clone())), InitResult::Initialized(value)) + *lock = WaiterValue::Ready(Ok(value.clone())); + InitResult::Initialized(value) } Err(e) => { let err: ErrorObject = Arc::new(e); - ( - Some(Err(Arc::clone(&err))), - InitResult::InitErr(err.downcast().unwrap()), - ) + + *lock = WaiterValue::Ready(Err(Arc::clone(&err))); + InitResult::InitErr(err.downcast().unwrap()) } }; - *lock = waiter_val; self.remove_waiter(w_key, w_hash); init_res } // Panicked. Err(payload) => { - *lock = None; + *lock = WaiterValue::InitFuturePanicked; // Remove the waiter so that others can retry. self.remove_waiter(w_key, w_hash); resume_unwind(payload); @@ -144,6 +189,109 @@ where // The write lock will be unlocked here. } + /// # Panics + /// Panics if the `init` future has been panicked. + pub(crate) fn try_compute<'a, C, F, O, E>( + &'a self, + c_key: &Arc, + c_hash: u64, + cache: &C, // Future to initialize a new value. + f: F, + post_init: fn(O) -> Result, E>, + allow_nop: bool, + ) -> ComputeResult + where + V: 'static, + C: GetOrInsert + Send + 'a, + F: FnOnce(Option>) -> O, + E: Send + Sync + 'static, + { + use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; + use ComputeResult::{EvalErr, Inserted, Nop, Removed, Updated}; + + let type_id = TypeId::of::(); + + let (w_key, w_hash) = self.waiter_key_hash(c_key, type_id); + + let waiter = TrioArc::new(RwLock::new(WaiterValue::Computing)); + // NOTE: We have to acquire a write lock before `try_insert_waiter`, + // so that any concurrent attempt will get our lock and wait on it. + let mut lock = waiter.write(); + + loop { + let Some(existing_waiter) = self.try_insert_waiter(w_key.clone(), w_hash, &waiter) + else { + break; + }; + + // Somebody else's waiter already exists, so wait for its result to become available. + let waiter_result = existing_waiter.read(); + match &*waiter_result { + // Unexpected state. + WaiterValue::Computing => panic!( + "Got unexpected state `Computing` after resolving `init` future. \ + This might be a bug in Moka" + ), + _ => { + // Retry from the beginning. + continue; + } + } + } + + // Our waiter was inserted. + + // Get the current value. + let maybe_entry = cache.get_entry_without_recording(c_key, c_hash); + let maybe_value = if allow_nop { + maybe_entry.as_ref().map(|ent| ent.value().clone()) + } else { + None + }; + let entry_existed = maybe_entry.is_some(); + + // Let's evaluate the `f` closure and get a future. Catching panic is safe + // here as we will not evaluate the closure again. + match catch_unwind(AssertUnwindSafe(|| f(maybe_entry))) { + // Evaluated. + Ok(op) => { + let init_res = match post_init(op) { + Ok(op) => match op { + compute::Op::Nop => Nop(maybe_value), + compute::Op::Put(value) => { + cache.insert(Arc::clone(c_key), c_hash, value.clone()); + if entry_existed { + Updated(value) + } else { + Inserted(value) + } + } + compute::Op::Remove => { + let maybe_prev_v = cache.remove(c_key, c_hash); + if let Some(prev_v) = maybe_prev_v { + Removed(prev_v) + } else { + Nop(None) + } + } + }, + Err(e) => EvalErr(e), + }; + *lock = WaiterValue::ReadyNone; + self.remove_waiter(w_key, w_hash); + init_res + } + // Panicked. + Err(payload) => { + *lock = WaiterValue::InitFuturePanicked; + // Remove the waiter so that others can retry. + self.remove_waiter(w_key, w_hash); + resume_unwind(payload); + } + } + // The lock will be unlocked here. + } + /// The `post_init` function for the `get_with` method of cache. pub(crate) fn post_init_for_get_with(value: V) -> Result { Ok(value) @@ -165,6 +313,26 @@ where result } + /// The `post_init` function for the `and_upsert_with` method of cache. + pub(crate) fn post_init_for_upsert_with(value: V) -> Result, ()> { + Ok(compute::Op::Put(value)) + } + + /// The `post_init` function for the `and_compute_with` method of cache. + pub(crate) fn post_init_for_compute_with(op: compute::Op) -> Result, ()> { + Ok(op) + } + + /// The `post_init` function for the `and_try_compute_with` method of cache. + pub(crate) fn post_init_for_try_compute_with( + op: Result, E>, + ) -> Result, E> + where + E: Send + Sync + 'static, + { + op + } + /// Returns the `type_id` for `get_with` method of cache. pub(crate) fn type_id_for_get_with() -> TypeId { // NOTE: We use a regular function here instead of a const fn because TypeId From 3d16028c01c78618c2eb083f9d817d78ad6b2631 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sat, 6 Jan 2024 15:51:12 +0800 Subject: [PATCH 12/16] Compute API - Add doc and examples for `sync::Cache` --- Cargo.toml | 16 + examples/README.md | 47 ++- examples/append_value_sync.rs | 60 ++++ examples/bounded_counter_sync.rs | 71 +++++ examples/counter_sync.rs | 36 +++ examples/try_append_value_async.rs | 2 +- examples/try_append_value_sync.rs | 111 +++++++ src/future/cache.rs | 24 ++ src/sync/cache.rs | 1 + src/sync/entry_selector.rs | 488 +++++++++++++++++++++++++++++ src/sync/segment.rs | 2 + 11 files changed, 846 insertions(+), 12 deletions(-) create mode 100644 examples/append_value_sync.rs create mode 100644 examples/bounded_counter_sync.rs create mode 100644 examples/counter_sync.rs create mode 100644 examples/try_append_value_sync.rs diff --git a/Cargo.toml b/Cargo.toml index 08464a4b..a2a77508 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -110,6 +110,10 @@ rustdoc-args = ["--cfg", "docsrs"] name = "append_value_async" required-features = ["future"] +[[example]] +name = "append_value_sync" +required-features = ["sync"] + [[example]] name = "basics_async" required-features = ["future"] @@ -122,6 +126,10 @@ required-features = ["sync"] name = "bounded_counter_async" required-features = ["future"] +[[example]] +name = "bounded_counter_sync" +required-features = ["sync"] + [[example]] name = "cascading_drop_async" required-features = ["future"] @@ -130,6 +138,10 @@ required-features = ["future"] name = "counter_async" required-features = ["future"] +[[example]] +name = "counter_sync" +required-features = ["sync"] + [[example]] name = "eviction_listener_sync" required-features = ["sync"] @@ -141,3 +153,7 @@ required-features = ["sync"] [[example]] name = "try_append_value_async" required-features = ["future"] + +[[example]] +name = "try_append_value_sync" +required-features = ["sync"] diff --git a/examples/README.md b/examples/README.md index 0d228cde..3ffed894 100644 --- a/examples/README.md +++ b/examples/README.md @@ -13,29 +13,54 @@ Each example has a suffix `_async` or `_sync`: ## Basics of the Cache API - [basics_async](./basics_async.rs) and [basics_sync](./basics_sync.rs) - - Sharing a cache between async tasks or OS threads. + - Shares a cache between async tasks or OS threads. - Do not wrap a `Cache` with `Arc>`! Just clone the `Cache` and you are all set. - - `insert`, `get` and `invalidate` methods. + - Uses `insert`, `get` and `invalidate` methods. - [size_aware_eviction_sync](./size_aware_eviction_sync.rs) - - Configuring the max capacity of the cache based on the total size of the cached + - Configures the max capacity of the cache based on the total size of the cached entries. +## The `Entry` API + +Atomically inserts, updates and removes an entry from the cache depending on the +existence of the entry. + +- [counter_async](./counter_async.rs) and [counter_sync](./counter_sync.rs) + - Atomically increments a cached `u64` by 1. If the entry does not exist, inserts + a new entry with the value 1. + - Uses `and_upsert_with` method. +- [bounded_counter_async](./bounded_counter_async.rs) and + [bounded_counter_sync](./bounded_counter_sync.rs) + - Same as above except removing the entry when the value is 2. + - `and_compute_with` method. +- [append_value_async](./append_value_async.rs) and + [append_value_sync](./append_value_sync.rs) + - Atomically appends an `i32` to a cached `Arc>>`. If the entry + does not exist, inserts a new entry. + - Uses `and_upsert_with` method. +- [try_append_value_async](./try_append_value_async.rs) and + [try_append_value_sync](./try_append_value_sync.rs) + - Atomically reads an `char` from a reader and appends it to a cached `Arc>`, + but reading may fail by an early EOF. + - Uses `and_try_compute_with` method. + ## Expiration and Eviction Listener - [eviction_listener_sync](./eviction_listener_sync.rs) - - Setting the `time_to_live` expiration policy. - - Registering a listener (closure) to be notified when an entry is evicted from - the cache. - - `insert`, `invalidate` and `invalidate_all` methods. - - Demonstrating when the expired entries will be actually evicted from the cache, + - Configures the `time_to_live` expiration policy. + - Registers a listener (closure) to be notified when an entry is evicted from the + cache. + - Uses `insert`, `invalidate`, `invalidate_all` and `run_pending_tasks` methods. + - Demonstrates when the expired entries will be actually evicted from the cache, and why the `run_pending_tasks` method could be important in some cases. - [cascading_drop_async](./cascading_drop_async.rs) - - Controlling the lifetime of the objects in a separate `BTreeMap` collection - from the cache using an eviction listener. - - `BTreeMap`, `Arc` and mpsc channel (multi-producer, single consumer channel). + - Controls the lifetime of the objects in a separate `BTreeMap` collection from + the cache using an eviction listener. + - Beside the cache APIs, uses `BTreeMap`, `Arc` and mpsc channel (multi-producer, + single consumer channel). ## Check out the API Documentation too! diff --git a/examples/append_value_sync.rs b/examples/append_value_sync.rs new file mode 100644 index 00000000..aef650a5 --- /dev/null +++ b/examples/append_value_sync.rs @@ -0,0 +1,60 @@ +//! This example demonstrates how to append an `i32` value to a cached `Vec` +//! value. It uses the `and_upsert_with` method of `Cache`. + +use std::sync::{Arc, RwLock}; + +use moka::{sync::Cache, Entry}; + +fn main() { + // We want to store a raw value `Vec` for each `String` key. We are going to + // append `i32` values to the `Vec` in the cache. + // + // Note that we have to wrap the `Vec` in an `Arc>`. We need the `Arc`, + // an atomic reference counted shared pointer, because `and_upsert_with` method + // of `Cache` passes a _clone_ of the value to our closure, instead of passing a + // `&mut` reference. We do not want to clone the `Vec` every time we append a + // value to it, so we wrap it in an `Arc`. Then we need the `RwLock` because we + // mutate the `Vec` when we append a value to it. + // + // The reason that `and_upsert_with` cannot pass a `&mut Vec<_>` to the closure + // is because the internal concurrent hash table of `Cache` is a lock free data + // structure and does not use any mutexes. So it cannot guarantee: (1) the `&mut + // Vec<_>` is unique, and (2) it is not accessed concurrently by other threads. + let cache: Cache>>> = Cache::new(100); + + let key = "key".to_string(); + + let entry = append_to_cached_vec(&cache, &key, 1); + assert!(entry.is_fresh()); + assert!(!entry.is_updated()); + assert_eq!(*entry.into_value().read().unwrap(), &[1]); + + let entry = append_to_cached_vec(&cache, &key, 2); + assert!(entry.is_fresh()); + assert!(entry.is_updated()); + assert_eq!(*entry.into_value().read().unwrap(), &[1, 2]); + + let entry = append_to_cached_vec(&cache, &key, 3); + assert!(entry.is_fresh()); + assert!(entry.is_updated()); + assert_eq!(*entry.into_value().read().unwrap(), &[1, 2, 3]); +} + +fn append_to_cached_vec( + cache: &Cache>>>, + key: &str, + value: i32, +) -> Entry>>> { + cache.entry_by_ref(key).and_upsert_with(|maybe_entry| { + if let Some(entry) = maybe_entry { + // The entry exists, append the value to the Vec. + let v = entry.into_value(); + v.write().unwrap().push(value); + v + } else { + // The entry does not exist, insert a new Vec containing + // the value. + Arc::new(RwLock::new(vec![value])) + } + }) +} diff --git a/examples/bounded_counter_sync.rs b/examples/bounded_counter_sync.rs new file mode 100644 index 00000000..703c6f6a --- /dev/null +++ b/examples/bounded_counter_sync.rs @@ -0,0 +1,71 @@ +//! This example demonstrates how to increment a cached `u64` counter. It uses the +//! `and_compute_with` method of `Cache`. + +use moka::{ + ops::compute::{self, PerformedOp}, + sync::Cache, + Entry, +}; + +fn main() { + let cache: Cache = Cache::new(100); + let key = "key".to_string(); + + // This should insert a now counter value 1 to the cache, and return the value + // with the kind of the operation performed. + let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); + let entry = maybe_entry.expect("An entry should be returned"); + assert_eq!(entry.into_value(), 1); + assert_eq!(performed_op, PerformedOp::Inserted); + + // This should increment the cached counter value by 1. + let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); + let entry = maybe_entry.expect("An entry should be returned"); + assert_eq!(entry.into_value(), 2); + assert_eq!(performed_op, PerformedOp::Updated); + + // This should remove the cached counter from the cache, and returns the + // _removed_ value. + let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); + let entry = maybe_entry.expect("An entry should be returned"); + assert_eq!(entry.into_value(), 2); + assert_eq!(performed_op, PerformedOp::Removed); + + // The key should no longer exist. + assert!(!cache.contains_key(&key)); + + // This should start over; insert a new counter value 1 to the cache. + let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); + let entry = maybe_entry.expect("An entry should be returned"); + assert_eq!(entry.into_value(), 1); + assert_eq!(performed_op, PerformedOp::Inserted); +} + +/// Increment a cached `u64` counter. If the counter is greater than or equal to 2, +/// remove it. +/// +/// This method uses cache's `and_compute_with` method. +fn inclement_or_remove_counter( + cache: &Cache, + key: &str, +) -> (Option>, compute::PerformedOp) { + // - If the counter does not exist, insert a new value of 1. + // - If the counter is less than 2, increment it by 1. + // - If the counter is greater than or equal to 2, remove it. + cache.entry_by_ref(key).and_compute_with(|maybe_entry| { + if let Some(entry) = maybe_entry { + // The entry exists. + let counter = entry.into_value(); + if counter < 2 { + // Increment the counter by 1. + compute::Op::Put(counter.saturating_add(1)) + } else { + // Remove the entry. + compute::Op::Remove + } + } else { + // The entry does not exist, insert a new value of 1. + compute::Op::Put(1) + } + }) +} diff --git a/examples/counter_sync.rs b/examples/counter_sync.rs new file mode 100644 index 00000000..35a5cd60 --- /dev/null +++ b/examples/counter_sync.rs @@ -0,0 +1,36 @@ +//! This example demonstrates how to increment a cached `u64` counter. It uses the +//! `and_upsert_with` method of `Cache`. + +use moka::{sync::Cache, Entry}; + +fn main() { + let cache: Cache = Cache::new(100); + let key = "key".to_string(); + + let entry = increment_counter(&cache, &key); + assert!(entry.is_fresh()); + assert!(!entry.is_updated()); + assert_eq!(entry.into_value(), 1); + + let entry = increment_counter(&cache, &key); + assert!(entry.is_fresh()); + assert!(entry.is_updated()); + assert_eq!(entry.into_value(), 2); + + let entry = increment_counter(&cache, &key); + assert!(entry.is_fresh()); + assert!(entry.is_updated()); + assert_eq!(entry.into_value(), 3); +} + +fn increment_counter(cache: &Cache, key: &str) -> Entry { + cache.entry_by_ref(key).and_upsert_with(|maybe_entry| { + if let Some(entry) = maybe_entry { + // The entry exists, increment the value by 1. + entry.into_value().saturating_add(1) + } else { + // The entry does not exist, insert a new value of 1. + 1 + } + }) +} diff --git a/examples/try_append_value_async.rs b/examples/try_append_value_async.rs index 76ca007b..e0cb7eed 100644 --- a/examples/try_append_value_async.rs +++ b/examples/try_append_value_async.rs @@ -19,7 +19,7 @@ type Key = i32; /// The type of the cache value. /// /// We want to store a raw value `String` for each `i32` key. We are going to append -/// `char` to the `String` value in the cache. +/// a `char` to the `String` value in the cache. /// /// Note that we have to wrap the `String` in an `Arc>`. We need the `Arc`, /// an atomic reference counted shared pointer, because `and_try_compute_with` method diff --git a/examples/try_append_value_sync.rs b/examples/try_append_value_sync.rs new file mode 100644 index 00000000..7cb49a1a --- /dev/null +++ b/examples/try_append_value_sync.rs @@ -0,0 +1,111 @@ +//! This example demonstrates how to append an `i32` value to a cached `Vec` +//! value. It uses the `and_upsert_with` method of `Cache`. + +use std::{ + io::{self, Cursor, Read}, + sync::{Arc, RwLock}, +}; + +use moka::{ + ops::compute::{self, PerformedOp}, + sync::Cache, + Entry, +}; + +/// The type of the cache key. +type Key = i32; + +/// The type of the cache value. +/// +/// We want to store a raw value `String` for each `i32` key. We are going to append +/// a `char` to the `String` value in the cache. +/// +/// Note that we have to wrap the `String` in an `Arc>`. We need the `Arc`, +/// an atomic reference counted shared pointer, because `and_try_compute_with` method +/// of `Cache` passes a _clone_ of the value to our closure, instead of passing a +/// `&mut` reference. We do not want to clone the `String` every time we append a +/// `char` to it, so we wrap it in an `Arc`. Then we need the `RwLock` because we +/// mutate the `String` when we append a value to it. +/// +/// The reason that `and_try_compute_with` cannot pass a `&mut String` to the closure +/// is because the internal concurrent hash table of `Cache` is a lock free data +/// structure and does not use any mutexes. So it cannot guarantee: (1) the +/// `&mut String` is unique, and (2) it is not accessed concurrently by other +/// threads. +type Value = Arc>; + +fn main() -> Result<(), tokio::io::Error> { + let cache: Cache = Cache::new(100); + + let key = 0; + + // We are going read a byte at a time from a byte string (`[u8; 3]`). + let mut reader = Cursor::new(b"abc"); + + // Read the first char 'a' from the reader, and insert a string "a" to the cache. + let (maybe_entry, performed_op) = append_to_cached_string(&cache, key, &mut reader)?; + let entry = maybe_entry.expect("An entry should be returned"); + assert_eq!(*entry.into_value().read().unwrap(), "a"); + assert_eq!(performed_op, PerformedOp::Inserted); + + // Read next char 'b' from the reader, and append it the cached string. + let (maybe_entry, performed_op) = append_to_cached_string(&cache, key, &mut reader)?; + let entry = maybe_entry.expect("An entry should be returned"); + assert_eq!(*entry.into_value().read().unwrap(), "ab"); + assert_eq!(performed_op, PerformedOp::Updated); + + // Read next char 'c' from the reader, and append it the cached string. + let (maybe_entry, performed_op) = append_to_cached_string(&cache, key, &mut reader)?; + let entry = maybe_entry.expect("An entry should be returned"); + assert_eq!(*entry.into_value().read().unwrap(), "abc"); + assert_eq!(performed_op, PerformedOp::Updated); + + // Reading should fail as no more char left. + let err = append_to_cached_string(&cache, key, &mut reader); + assert_eq!( + err.expect_err("An error should be returned").kind(), + io::ErrorKind::UnexpectedEof + ); + + Ok(()) +} + +/// Reads a byte from the `reader``, convert it into a `char`, append it to the +/// cached `String` for the given `key`, and returns the resulting cached entry. +/// +/// If reading from the `reader` fails with an IO error, it returns the error. +/// +/// This method uses cache's `and_try_compute_with` method. +fn append_to_cached_string( + cache: &Cache, + key: Key, + reader: &mut impl Read, +) -> io::Result<(Option>, PerformedOp)> { + cache.entry(key).and_try_compute_with(|maybe_entry| { + // Read a char from the reader. + let mut buf = [0u8]; + let len = reader.read(&mut buf)?; + if len == 0 { + // No more char left. + return Err(io::Error::new( + io::ErrorKind::UnexpectedEof, + "No more char left", + )); + } + let char = + char::from_u32(buf[0] as u32).expect("An ASCII byte should be converted into a char"); + + // Check if the entry already exists. + if let Some(entry) = maybe_entry { + // The entry exists, append the char to the Vec. + let v = entry.into_value(); + v.write().unwrap().push(char); + Ok(compute::Op::Put(v)) + } else { + // The entry does not exist, insert a new Vec containing + // the char. + let v = RwLock::new(String::from(char)); + Ok(compute::Op::Put(Arc::new(v))) + } + }) +} diff --git a/src/future/cache.rs b/src/future/cache.rs index edea7ee0..a4e82ab0 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -1063,6 +1063,7 @@ where .into_value() } + /// TODO: Remove this in v0.13.0. /// Deprecated, replaced with /// [`entry()::or_insert_with_if()`](./struct.OwnedKeyEntrySelector.html#method.or_insert_with_if) #[deprecated(since = "0.10.0", note = "Replaced with `entry().or_insert_with_if()`")] @@ -2159,6 +2160,7 @@ mod tests { common::time::Clock, future::FutureExt, notification::{ListenerFuture, RemovalCause}, + ops::compute, policy::test_utils::ExpiryCallCounters, Expiry, }; @@ -2196,6 +2198,17 @@ mod tests { is_send(cache.try_get_with_by_ref(&(), async { Err(()) })); // entry fns + is_send( + cache + .entry(()) + .and_compute_with(|_| async { compute::Op::Nop }), + ); + is_send( + cache + .entry(()) + .and_try_compute_with(|_| async { Ok(compute::Op::Nop) as Result<_, Infallible> }), + ); + is_send(cache.entry(()).and_upsert_with(|_| async {})); is_send(cache.entry(()).or_default()); is_send(cache.entry(()).or_insert(())); is_send(cache.entry(()).or_insert_with(async {})); @@ -2204,6 +2217,17 @@ mod tests { is_send(cache.entry(()).or_try_insert_with(async { Err(()) })); // entry_by_ref fns + is_send( + cache + .entry_by_ref(&()) + .and_compute_with(|_| async { compute::Op::Nop }), + ); + is_send( + cache + .entry_by_ref(&()) + .and_try_compute_with(|_| async { Ok(compute::Op::Nop) as Result<_, Infallible> }), + ); + is_send(cache.entry_by_ref(&()).and_upsert_with(|_| async {})); is_send(cache.entry_by_ref(&()).or_default()); is_send(cache.entry_by_ref(&()).or_insert(())); is_send(cache.entry_by_ref(&()).or_insert_with(async {})); diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 618c7afb..6aac4851 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -966,6 +966,7 @@ where .into_value() } + /// TODO: Remove this in v0.13.0. /// Deprecated, replaced with /// [`entry()::or_insert_with_if()`](./struct.OwnedKeyEntrySelector.html#method.or_insert_with_if) #[deprecated(since = "0.10.0", note = "Replaced with `entry().or_insert_with_if()`")] diff --git a/src/sync/entry_selector.rs b/src/sync/entry_selector.rs index c6385fc3..d94ff419 100644 --- a/src/sync/entry_selector.rs +++ b/src/sync/entry_selector.rs @@ -38,6 +38,120 @@ where } } + /// Performs a compute operation on a cached entry by using the given closure + /// `f`. A compute operation is either put, remove or no-operation (nop). + /// + /// The closure `f` should take the current entry of `Option>` for + /// the key, and return an `ops::compute::Op` enum. + /// + /// This method works as the followings: + /// + /// 1. Apply the closure `f` to the current cached `Entry`, and get an + /// `ops::compute::Op`. + /// 2. Execute the op on the cache: + /// - `Op::Put(V)`: Put the new value `V` to the cache. + /// - `Op::Remove`: Remove the current cached entry. + /// - `Op::Nop`: Do nothing. + /// 3. Return an `(Option, ops::compute::PerformedOp)` as the followings: + /// + /// | [`Op`] | `Entry` to return | [`PerformedOp`] | + /// |:--------- |:--------------------------- |:----------------------- | + /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | + /// | `Remove` | The _removed_ entry | `Removed` | + /// | `Nop` | The current entry or `None` | `Nop` | + /// + /// **Notes:** + /// + /// - `Op::Put(V)`: `PerformedOp::Updated` is returned when the key already + /// existed in the cache. It is _not_ related to whether the value was actually + /// updated or not. It can be replaced with the same value. + /// - `Op::Remove`: Unlike other ops, the _removed_ entry is returned. If you mix + /// `Remove` with other ops, ensure to check whether the performed op is + /// `Removed` or not. + /// + /// # Similar Methods + /// + /// - If you want the `Future` resolve to `Result>` instead of `Op`, and + /// upsert only when resolved to `Ok(V)`, use the [`and_try_compute_with`] + /// method. + /// - If you only want to put, use the [`and_upsert_with`] method. + /// + /// [`Entry`]: ../struct.Entry.html + /// [`Op`]: ../ops/compute/enum.Op.html + /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`and_upsert_with`]: #method.and_upsert_with + /// [`and_try_compute_with`]: #method.and_try_compute_with + /// + /// # Example + /// + /// ```rust + /// use moka::{ + /// sync::Cache, + /// ops::compute::{self, PerformedOp}, + /// Entry, + /// }; + /// + /// let cache: Cache = Cache::new(100); + /// let key = "key1".to_string(); + /// + /// /// Increment a cached `u64` counter. If the counter is greater than or + /// /// equal to 2, remove it. + /// fn inclement_or_remove_counter( + /// cache: &Cache, + /// key: &str, + /// ) -> (Option>, compute::PerformedOp) { + /// cache + /// .entry(key.to_string()) + /// .and_compute_with(|maybe_entry| { + /// if let Some(entry) = maybe_entry { + /// let counter = entry.into_value(); + /// if counter < 2 { + /// compute::Op::Put(counter.saturating_add(1)) // Update + /// } else { + /// compute::Op::Remove // Remove + /// } + /// } else { + /// compute::Op::Put(1) // Insert + /// } + /// }) + /// } + /// + /// // This should insert a now counter value 1 to the cache, and return the + /// // value with the kind of the operation performed. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 1); + /// assert_eq!(performed_op, PerformedOp::Inserted); + /// + /// // This should increment the cached counter value by 1. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 2); + /// assert_eq!(performed_op, PerformedOp::Updated); + /// + /// // This should remove the cached counter from the cache, and returns the + /// // _removed_ value. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 2); + /// assert_eq!(performed_op, PerformedOp::Removed); + /// + /// // The key should no longer exist. + /// assert!(!cache.contains_key(&key)); + /// + /// // This should start over; insert a new counter value 1 to the cache. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 1); + /// assert_eq!(performed_op, PerformedOp::Inserted); + /// ``` + /// + /// # Concurrent calls on the same key + /// + /// This method guarantees that concurrent calls on the same key are executed + /// serially. That is, `and_compute_with` calls on the same key never run + /// concurrently. The calls are serialized by the order of their invocation. It + /// uses a key-level lock to achieve this. pub fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) where F: FnOnce(Option>) -> compute::Op, @@ -46,6 +160,64 @@ where self.cache.compute_with_hash_and_fun(key, self.hash, f) } + /// Performs a compute operation on a cached entry by using the given closure + /// `f`. A compute operation is either put, remove or no-operation (nop). + /// + /// The closure `f` should take the current entry of `Option>` for + /// the key, and return a `Result, E>`. + /// + /// This method works as the followings: + /// + /// 1. Apply the closure `f` to the current cached `Entry`, and get a + /// `Result, E>`. + /// 2. If resolved to `Err(E)`, return it. + /// 3. Else, execute the op on the cache: + /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache. + /// - `Ok(Op::Remove)`: Remove the current cached entry. + /// - `Ok(Op::Nop)`: Do nothing. + /// 4. Return a `Ok((Option, ops::compute::PerformedOp))` as the + /// followings: + /// + /// | [`Op`] | `Entry` to return | [`PerformedOp`] | + /// |:--------- |:--------------------------- |:----------------------- | + /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | + /// | `Remove` | The _removed_ entry | `Removed` | + /// | `Nop` | The current entry or `None` | `Nop` | + /// + /// **Notes:** + /// + /// - `Ok(Op::Put(V))`: `PerformedOp::Updated` is returned when the key already + /// existed in the cache. It is _not_ related to whether the value was actually + /// updated or not. It can be replaced with the same value. + /// - `Ok(Op::Remove)`: Unlike other ops, the _removed_ entry is returned. If you + /// mix `Remove` with other ops, ensure to check whether the performed op is + /// `Removed` or not. + /// + /// # Similar Methods + /// + /// - If you want the `Future` resolve to `Op` instead of `Result>`, use + /// the [`and_compute_with`] method. + /// - If you only want to put, use the [`and_upsert_with`] method. + /// + /// [`Entry`]: ../struct.Entry.html + /// [`Op`]: ../ops/compute/enum.Op.html + /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`and_upsert_with`]: #method.and_upsert_with + /// [`and_compute_with`]: #method.and_compute_with + /// + /// # Example + /// + /// See [`try_append_value_async.rs`] in the `examples` directory. + /// + /// [`try_append_value_sync.rs`]: + /// https://github.com/moka-rs/moka/tree/main/examples/try_append_value_sync.rs + /// + /// # Concurrent calls on the same key + /// + /// This method guarantees that concurrent calls on the same key are executed + /// serially. That is, `and_try_compute_with` calls on the same key never run + /// concurrently. The calls are serialized by the order of their invocation. It + /// uses a key-level lock to achieve this. pub fn and_try_compute_with( self, f: F, @@ -58,6 +230,78 @@ where self.cache.try_compute_with_hash_and_fun(key, self.hash, f) } + /// Performs an upsert of an [`Entry`] by using the given closure `f`. The word + /// "upsert" here means "update" or "insert". + /// + /// The closure `f` should take the current entry of `Option>` for + /// the key, and return a new value `V`. + /// + /// This method works as the followings: + /// + /// 1. Apply the closure `f` to the current cached `Entry`, and get a new value + /// `V`. + /// 2. Upsert the new value to the cache. + /// 3. Return the `Entry` having the upserted value. + /// + /// # Similar Methods + /// + /// - If you want to optionally upsert, that is to upsert only when certain + /// conditions meet, use the [`and_compute_with`] method. + /// - If you try to upsert, that is to make the `Future` resolve to `Result` + /// instead of `V`, and upsert only when resolved to `Ok(V)`, use the + /// [`and_try_compute_with`] method. + /// + /// [`Entry`]: ../struct.Entry.html + /// [`and_compute_with`]: #method.and_compute_with + /// [`and_try_compute_with`]: #method.and_try_compute_with + /// + /// # Example + /// + /// ```rust + /// use moka::sync::Cache; + /// + /// let cache: Cache = Cache::new(100); + /// let key = "key1".to_string(); + /// + /// let entry = cache + /// .entry(key.clone()) + /// .and_upsert_with(|maybe_entry| { + /// if let Some(entry) = maybe_entry { + /// entry.into_value().saturating_add(1) // Update + /// } else { + /// 1 // Insert + /// } + /// }); + /// // It was not an update. + /// assert!(!entry.is_updated()); + /// assert_eq!(entry.key(), &key); + /// assert_eq!(entry.into_value(), 1); + /// + /// let entry = cache + /// .entry(key.clone()) + /// .and_upsert_with(|maybe_entry| { + /// if let Some(entry) = maybe_entry { + /// entry.into_value().saturating_add(1) + /// } else { + /// 1 + /// } + /// }); + /// // It was an update. + /// assert!(entry.is_updated()); + /// assert_eq!(entry.key(), &key); + /// assert_eq!(entry.into_value(), 2); + /// ``` + /// + /// Note: The `is_updated` method of the `Entry` returns `true` when the key + /// already existed in the cache. It is not related to whether the value was + /// actually updated or not. It can be replaced with the same value. + /// + /// # Concurrent calls on the same key + /// + /// This method guarantees that concurrent calls on the same key are executed + /// serially. That is, `and_upsert_with` calls on the same key never run + /// concurrently. The calls are serialized by the order of their invocation. It + /// uses a key-level lock to achieve this. pub fn and_upsert_with(self, f: F) -> Entry where F: FnOnce(Option>) -> V, @@ -353,6 +597,120 @@ where } } + /// Performs a compute operation on a cached entry by using the given closure + /// `f`. A compute operation is either put, remove or no-operation (nop). + /// + /// The closure `f` should take the current entry of `Option>` for + /// the key, and return an `ops::compute::Op` enum. + /// + /// This method works as the followings: + /// + /// 1. Apply the closure `f` to the current cached `Entry`, and get an + /// `ops::compute::Op`. + /// 2. Execute the op on the cache: + /// - `Op::Put(V)`: Put the new value `V` to the cache. + /// - `Op::Remove`: Remove the current cached entry. + /// - `Op::Nop`: Do nothing. + /// 3. Return an `(Option, ops::compute::PerformedOp)` as the followings: + /// + /// | [`Op`] | `Entry` to return | [`PerformedOp`] | + /// |:--------- |:--------------------------- |:----------------------- | + /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | + /// | `Remove` | The _removed_ entry | `Removed` | + /// | `Nop` | The current entry or `None` | `Nop` | + /// + /// **Notes:** + /// + /// - `Op::Put(V)`: `PerformedOp::Updated` is returned when the key already + /// existed in the cache. It is _not_ related to whether the value was actually + /// updated or not. It can be replaced with the same value. + /// - `Op::Remove`: Unlike other ops, the _removed_ entry is returned. If you mix + /// `Remove` with other ops, ensure to check whether the performed op is + /// `Removed` or not. + /// + /// # Similar Methods + /// + /// - If you want the `Future` resolve to `Result>` instead of `Op`, and + /// upsert only when resolved to `Ok(V)`, use the [`and_try_compute_with`] + /// method. + /// - If you only want to put, use the [`and_upsert_with`] method. + /// + /// [`Entry`]: ../struct.Entry.html + /// [`Op`]: ../ops/compute/enum.Op.html + /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`and_upsert_with`]: #method.and_upsert_with + /// [`and_try_compute_with`]: #method.and_try_compute_with + /// + /// # Example + /// + /// ```rust + /// use moka::{ + /// sync::Cache, + /// ops::compute::{self, PerformedOp}, + /// Entry, + /// }; + /// + /// let cache: Cache = Cache::new(100); + /// let key = "key1".to_string(); + /// + /// /// Increment a cached `u64` counter. If the counter is greater than or + /// /// equal to 2, remove it. + /// fn inclement_or_remove_counter( + /// cache: &Cache, + /// key: &str, + /// ) -> (Option>, compute::PerformedOp) { + /// cache + /// .entry_by_ref(key) + /// .and_compute_with(|maybe_entry| { + /// if let Some(entry) = maybe_entry { + /// let counter = entry.into_value(); + /// if counter < 2 { + /// compute::Op::Put(counter.saturating_add(1)) // Update + /// } else { + /// compute::Op::Remove // Remove + /// } + /// } else { + /// compute::Op::Put(1) // Insert + /// } + /// }) + /// } + /// + /// // This should insert a now counter value 1 to the cache, and return the + /// // value with the kind of the operation performed. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 1); + /// assert_eq!(performed_op, PerformedOp::Inserted); + /// + /// // This should increment the cached counter value by 1. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 2); + /// assert_eq!(performed_op, PerformedOp::Updated); + /// + /// // This should remove the cached counter from the cache, and returns the + /// // _removed_ value. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 2); + /// assert_eq!(performed_op, PerformedOp::Removed); + /// + /// // The key should no longer exist. + /// assert!(!cache.contains_key(&key)); + /// + /// // This should start over; insert a new counter value 1 to the cache. + /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); + /// let entry = maybe_entry.expect("An entry should be returned"); + /// assert_eq!(entry.into_value(), 1); + /// assert_eq!(performed_op, PerformedOp::Inserted); + /// ``` + /// + /// # Concurrent calls on the same key + /// + /// This method guarantees that concurrent calls on the same key are executed + /// serially. That is, `and_compute_with` calls on the same key never run + /// concurrently. The calls are serialized by the order of their invocation. It + /// uses a key-level lock to achieve this. pub fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) where F: FnOnce(Option>) -> compute::Op, @@ -361,6 +719,64 @@ where self.cache.compute_with_hash_and_fun(key, self.hash, f) } + /// Performs a compute operation on a cached entry by using the given closure + /// `f`. A compute operation is either put, remove or no-operation (nop). + /// + /// The closure `f` should take the current entry of `Option>` for + /// the key, and return a `Result, E>`. + /// + /// This method works as the followings: + /// + /// 1. Apply the closure `f` to the current cached `Entry`, and get a + /// `Result, E>`. + /// 2. If resolved to `Err(E)`, return it. + /// 3. Else, execute the op on the cache: + /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache. + /// - `Ok(Op::Remove)`: Remove the current cached entry. + /// - `Ok(Op::Nop)`: Do nothing. + /// 4. Return a `Ok((Option, ops::compute::PerformedOp))` as the + /// followings: + /// + /// | [`Op`] | `Entry` to return | [`PerformedOp`] | + /// |:--------- |:--------------------------- |:----------------------- | + /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | + /// | `Remove` | The _removed_ entry | `Removed` | + /// | `Nop` | The current entry or `None` | `Nop` | + /// + /// **Notes:** + /// + /// - `Ok(Op::Put(V))`: `PerformedOp::Updated` is returned when the key already + /// existed in the cache. It is _not_ related to whether the value was actually + /// updated or not. It can be replaced with the same value. + /// - `Ok(Op::Remove)`: Unlike other ops, the _removed_ entry is returned. If you + /// mix `Remove` with other ops, ensure to check whether the performed op is + /// `Removed` or not. + /// + /// # Similar Methods + /// + /// - If you want the `Future` resolve to `Op` instead of `Result>`, use + /// the [`and_compute_with`] method. + /// - If you only want to put, use the [`and_upsert_with`] method. + /// + /// [`Entry`]: ../struct.Entry.html + /// [`Op`]: ../ops/compute/enum.Op.html + /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`and_upsert_with`]: #method.and_upsert_with + /// [`and_compute_with`]: #method.and_compute_with + /// + /// # Example + /// + /// See [`try_append_value_async.rs`] in the `examples` directory. + /// + /// [`try_append_value_sync.rs`]: + /// https://github.com/moka-rs/moka/tree/main/examples/try_append_value_sync.rs + /// + /// # Concurrent calls on the same key + /// + /// This method guarantees that concurrent calls on the same key are executed + /// serially. That is, `and_try_compute_with` calls on the same key never run + /// concurrently. The calls are serialized by the order of their invocation. It + /// uses a key-level lock to achieve this. pub fn and_try_compute_with( self, f: F, @@ -373,6 +789,78 @@ where self.cache.try_compute_with_hash_and_fun(key, self.hash, f) } + /// Performs an upsert of an [`Entry`] by using the given closure `f`. The word + /// "upsert" here means "update" or "insert". + /// + /// The closure `f` should take the current entry of `Option>` for + /// the key, and return a new value `V`. + /// + /// This method works as the followings: + /// + /// 1. Apply the closure `f` to the current cached `Entry`, and get a new value + /// `V`. + /// 2. Upsert the new value to the cache. + /// 3. Return the `Entry` having the upserted value. + /// + /// # Similar Methods + /// + /// - If you want to optionally upsert, that is to upsert only when certain + /// conditions meet, use the [`and_compute_with`] method. + /// - If you try to upsert, that is to make the `Future` resolve to `Result` + /// instead of `V`, and upsert only when resolved to `Ok(V)`, use the + /// [`and_try_compute_with`] method. + /// + /// [`Entry`]: ../struct.Entry.html + /// [`and_compute_with`]: #method.and_compute_with + /// [`and_try_compute_with`]: #method.and_try_compute_with + /// + /// # Example + /// + /// ```rust + /// use moka::sync::Cache; + /// + /// let cache: Cache = Cache::new(100); + /// let key = "key1".to_string(); + /// + /// let entry = cache + /// .entry_by_ref(&key) + /// .and_upsert_with(|maybe_entry| { + /// if let Some(entry) = maybe_entry { + /// entry.into_value().saturating_add(1) // Update + /// } else { + /// 1 // Insert + /// } + /// }); + /// // It was not an update. + /// assert!(!entry.is_updated()); + /// assert_eq!(entry.key(), &key); + /// assert_eq!(entry.into_value(), 1); + /// + /// let entry = cache + /// .entry_by_ref(&key) + /// .and_upsert_with(|maybe_entry| { + /// if let Some(entry) = maybe_entry { + /// entry.into_value().saturating_add(1) + /// } else { + /// 1 + /// } + /// }); + /// // It was an update. + /// assert!(entry.is_updated()); + /// assert_eq!(entry.key(), &key); + /// assert_eq!(entry.into_value(), 2); + /// ``` + /// + /// Note: The `is_updated` method of the `Entry` returns `true` when the key + /// already existed in the cache. It is not related to whether the value was + /// actually updated or not. It can be replaced with the same value. + /// + /// # Concurrent calls on the same key + /// + /// This method guarantees that concurrent calls on the same key are executed + /// serially. That is, `and_upsert_with` calls on the same key never run + /// concurrently. The calls are serialized by the order of their invocation. It + /// uses a key-level lock to achieve this. pub fn and_upsert_with(self, f: F) -> Entry where F: FnOnce(Option>) -> V, diff --git a/src/sync/segment.rs b/src/sync/segment.rs index 4f84f404..aafdc777 100644 --- a/src/sync/segment.rs +++ b/src/sync/segment.rs @@ -284,12 +284,14 @@ where RefKeyEntrySelector::new(key, hash, cache) } + /// TODO: Remove this in v0.13.0. /// Deprecated, replaced with [`get_with`](#method.get_with) #[deprecated(since = "0.8.0", note = "Replaced with `get_with`")] pub fn get_or_insert_with(&self, key: K, init: impl FnOnce() -> V) -> V { self.get_with(key, init) } + /// TODO: Remove this in v0.13.0. /// Deprecated, replaced with [`try_get_with`](#method.try_get_with) #[deprecated(since = "0.8.0", note = "Replaced with `try_get_with`")] pub fn get_or_try_insert_with(&self, key: K, init: F) -> Result> From 3db436f2939b678b519b724ad31d0c95e032a4b0 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sat, 6 Jan 2024 16:21:50 +0800 Subject: [PATCH 13/16] Bump the version to v0.12.3 Also update the change log. --- CHANGELOG.md | 15 +++++++++++++++ Cargo.toml | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 09d64a46..5eb75834 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Moka Cache — Change Log +## Version 0.12.3 + +### Added + +- Added the upsert and compute methods for modifying a cached entry + ([#370][gh-pull-0370]): + - Now the `entry` or `entry_by_ref` APIs have the following methods: + - `and_upsert_with` method to insert or update the entry. + - `and_compute_with` method to insert, update, remove or do nothing on the + entry. + - `and_try_compute_with` method, which is similar to above but returns + `Result`. + + ## Version 0.12.2 ### Fixed @@ -781,6 +795,7 @@ The minimum supported Rust version (MSRV) is now 1.51.0 (Mar 25, 2021). [gh-issue-0034]: https://github.com/moka-rs/moka/issues/34/ [gh-issue-0031]: https://github.com/moka-rs/moka/issues/31/ +[gh-pull-0370]: https://github.com/moka-rs/moka/pull/370/ [gh-pull-0363]: https://github.com/moka-rs/moka/pull/363/ [gh-pull-0350]: https://github.com/moka-rs/moka/pull/350/ [gh-pull-0348]: https://github.com/moka-rs/moka/pull/348/ diff --git a/Cargo.toml b/Cargo.toml index a2a77508..ae460947 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "moka" -version = "0.12.2" +version = "0.12.3" edition = "2021" # Rust 1.65 was released on Nov 3, 2022. rust-version = "1.65" From 6eff2d330e23428714c51ef2cba226f24afc19e5 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sat, 6 Jan 2024 18:46:17 +0800 Subject: [PATCH 14/16] Compute API - Change the compute family methods to record a read access to the entry Also improve the docs and source code comments of the compute family methods. --- src/common/entry.rs | 6 +++++- src/future/cache.rs | 4 ++-- src/future/value_initializer.rs | 25 ++++++++++++++--------- src/sync/cache.rs | 8 ++++---- src/sync/value_initializer.rs | 35 +++++++++++++++++++-------------- 5 files changed, 47 insertions(+), 31 deletions(-) diff --git a/src/common/entry.rs b/src/common/entry.rs index 99d40a4d..9b194746 100644 --- a/src/common/entry.rs +++ b/src/common/entry.rs @@ -76,7 +76,11 @@ impl Entry { self.is_fresh } - /// Returns `true` if the value in this `Entry` replaced an old cached value. + /// Returns `true` if the value in this `Entry` was already cached and replaced + /// with a new value. + /// + /// Note that the new value can be the same as the old value. In that case, this + /// method still returns `true`. pub fn is_updated(&self) -> bool { self.is_updated } diff --git a/src/future/cache.rs b/src/future/cache.rs index a4e82ab0..a52a9202 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -2083,10 +2083,10 @@ where .map(Entry::into_value) } - async fn get_entry_without_recording(&self, key: &Arc, hash: u64) -> Option> { + async fn get_entry(&self, key: &Arc, hash: u64) -> Option> { let ignore_if = None as Option<&mut fn(&V) -> bool>; self.base - .get_with_hash(key, hash, ignore_if, true, false) + .get_with_hash(key, hash, ignore_if, true, true) .await } diff --git a/src/future/value_initializer.rs b/src/future/value_initializer.rs index aa49c438..bd04fc39 100644 --- a/src/future/value_initializer.rs +++ b/src/future/value_initializer.rs @@ -19,6 +19,8 @@ const WAITER_MAP_NUM_SEGMENTS: usize = 64; #[async_trait] pub(crate) trait GetOrInsert { + /// Gets a value for the given key without recording the access to the cache + /// policies. async fn get_without_recording( &self, key: &Arc, @@ -29,12 +31,16 @@ pub(crate) trait GetOrInsert { V: 'static, I: for<'i> FnMut(&'i V) -> bool + Send; - async fn get_entry_without_recording(&self, key: &Arc, hash: u64) -> Option> + /// Gets an entry for the given key _with_ recording the access to the cache + /// policies. + async fn get_entry(&self, key: &Arc, hash: u64) -> Option> where V: 'static; + /// Inserts a value for the given key. async fn insert(&self, key: Arc, hash: u64, value: V); + /// Removes a value for the given key. Returns the removed value. async fn remove(&self, key: &Arc, hash: u64) -> Option; } @@ -209,6 +215,7 @@ where let Some(existing_waiter) = try_insert_waiter(&self.waiters, w_key.clone(), w_hash, &waiter) else { + // Inserted. break; }; @@ -290,7 +297,7 @@ where &'a self, c_key: &Arc, c_hash: u64, - cache: &C, // Future to initialize a new value. + cache: &C, f: F, post_init: fn(O) -> Result, E>, allow_nop: bool, @@ -305,9 +312,7 @@ where use ComputeResult::{EvalErr, Inserted, Nop, Removed, Updated}; let type_id = TypeId::of::(); - let (w_key, w_hash) = waiter_key_hash(&self.waiters, c_key, type_id); - let waiter = TrioArc::new(RwLock::new(WaiterValue::Computing)); // NOTE: We have to acquire a write lock before `try_insert_waiter`, // so that any concurrent attempt will get our lock and wait on it. @@ -317,10 +322,12 @@ where let Some(existing_waiter) = try_insert_waiter(&self.waiters, w_key.clone(), w_hash, &waiter) else { + // Inserted. break; }; - // Somebody else's waiter already exists, so wait for its result to become available. + // Somebody else's waiter already exists, so wait for it to finish + // (wait for it to release the write lock). let waiter_result = existing_waiter.read().await; match &*waiter_result { // Unexpected state. @@ -329,7 +336,7 @@ where This might be a bug in Moka" ), _ => { - // Retry from the beginning. + // Try to insert our waiter again. continue; } } @@ -343,7 +350,7 @@ where let waiter_guard = WaiterGuard::new(w_key, w_hash, &self.waiters, lock); // Get the current value. - let maybe_entry = cache.get_entry_without_recording(c_key, c_hash).await; + let maybe_entry = cache.get_entry(c_key, c_hash).await; let maybe_value = if allow_nop { maybe_entry.as_ref().map(|ent| ent.value().clone()) } else { @@ -351,8 +358,8 @@ where }; let entry_existed = maybe_entry.is_some(); - // Let's evaluate the `f` closure and get a future. Catching panic is safe - // here as we will not evaluate the closure again. + // Evaluate the `f` closure and get a future. Catching panic is safe here as + // we will not evaluate the closure again. let fut = match std::panic::catch_unwind(AssertUnwindSafe(|| f(maybe_entry))) { // Evaluated. Ok(fut) => fut, diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 6aac4851..2ba521ab 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -1905,7 +1905,7 @@ where V: Clone + Send + Sync + 'static, S: BuildHasher + Clone + Send + Sync + 'static, { - fn get_entry_without_recording(&self, key: &Arc, hash: u64) -> Option> { + fn get_entry(&self, key: &Arc, hash: u64) -> Option> { let ignore_if = None as Option<&mut fn(&V) -> bool>; self.base .get_with_hash_and_ignore_if(key, hash, ignore_if, true) @@ -4033,7 +4033,7 @@ mod tests { // Spawn three threads to call `and_upsert_with` for the same key and each // task increments the current value by 1. Ensure the key-level lock is - // working by verifying the value is 3 after all tasks finish. + // working by verifying the value is 3 after all threads finish. // // | | thread 1 | thread 2 | thread 3 | // |--------|----------|----------|----------| @@ -4103,7 +4103,7 @@ mod tests { const KEY: u32 = 0; // Spawn six threads to call `and_compute_with` for the same key. Ensure the - // key-level lock is working by verifying the value after all tasks finish. + // key-level lock is working by verifying the value after all threads finish. // // | | thread 1 | thread 2 | thread 3 | thread 4 | thread 5 | thread 6 | // |---------|------------|---------------|------------|----------|------------|----------| @@ -4271,7 +4271,7 @@ mod tests { const KEY: u32 = 0; // Spawn four threads to call `and_try_compute_with` for the same key. Ensure - // the key-level lock is working by verifying the value after all tasks + // the key-level lock is working by verifying the value after all threads // finish. // // | | thread 1 | thread 2 | thread 3 | thread 4 | diff --git a/src/sync/value_initializer.rs b/src/sync/value_initializer.rs index 88061132..d9832ec7 100644 --- a/src/sync/value_initializer.rs +++ b/src/sync/value_initializer.rs @@ -14,12 +14,16 @@ use super::{ComputeNone, OptionallyNone}; const WAITER_MAP_NUM_SEGMENTS: usize = 64; pub(crate) trait GetOrInsert { - fn get_entry_without_recording(&self, key: &Arc, hash: u64) -> Option> + /// Gets an entry for the given key _with_ recording the access to the cache + /// policies. + fn get_entry(&self, key: &Arc, hash: u64) -> Option> where V: 'static; + /// Inserts a value for the given key. fn insert(&self, key: Arc, hash: u64, value: V); + /// Removes a value for the given key. Returns the removed value. fn remove(&self, key: &Arc, hash: u64) -> Option; } @@ -31,7 +35,7 @@ enum WaiterValue { Ready(Result), ReadyNone, // https://github.com/moka-rs/moka/issues/43 - InitFuturePanicked, + InitClosurePanicked, } impl fmt::Debug for WaiterValue { @@ -40,7 +44,7 @@ impl fmt::Debug for WaiterValue { WaiterValue::Computing => write!(f, "Computing"), WaiterValue::Ready(_) => write!(f, "Ready"), WaiterValue::ReadyNone => write!(f, "ReadyNone"), - WaiterValue::InitFuturePanicked => write!(f, "InitFuturePanicked"), + WaiterValue::InitClosurePanicked => write!(f, "InitFuturePanicked"), } } } @@ -117,6 +121,7 @@ where loop { let Some(existing_waiter) = self.try_insert_waiter(w_key.clone(), w_hash, &waiter) else { + // Inserted. break; }; @@ -126,7 +131,7 @@ where WaiterValue::Ready(Ok(value)) => return ReadExisting(value.clone()), WaiterValue::Ready(Err(e)) => return InitErr(Arc::clone(e).downcast().unwrap()), // Somebody else's init closure has been panicked. - WaiterValue::InitFuturePanicked => { + WaiterValue::InitClosurePanicked => { retries += 1; assert!( retries < MAX_RETRIES, @@ -180,7 +185,7 @@ where } // Panicked. Err(payload) => { - *lock = WaiterValue::InitFuturePanicked; + *lock = WaiterValue::InitClosurePanicked; // Remove the waiter so that others can retry. self.remove_waiter(w_key, w_hash); resume_unwind(payload); @@ -190,12 +195,12 @@ where } /// # Panics - /// Panics if the `init` future has been panicked. + /// Panics if the `init` closure has been panicked. pub(crate) fn try_compute<'a, C, F, O, E>( &'a self, c_key: &Arc, c_hash: u64, - cache: &C, // Future to initialize a new value. + cache: &C, f: F, post_init: fn(O) -> Result, E>, allow_nop: bool, @@ -210,9 +215,7 @@ where use ComputeResult::{EvalErr, Inserted, Nop, Removed, Updated}; let type_id = TypeId::of::(); - let (w_key, w_hash) = self.waiter_key_hash(c_key, type_id); - let waiter = TrioArc::new(RwLock::new(WaiterValue::Computing)); // NOTE: We have to acquire a write lock before `try_insert_waiter`, // so that any concurrent attempt will get our lock and wait on it. @@ -221,10 +224,12 @@ where loop { let Some(existing_waiter) = self.try_insert_waiter(w_key.clone(), w_hash, &waiter) else { + // Inserted. break; }; - // Somebody else's waiter already exists, so wait for its result to become available. + // Somebody else's waiter already exists, so wait for it to finish + // (wait for it to release the write lock). let waiter_result = existing_waiter.read(); match &*waiter_result { // Unexpected state. @@ -233,7 +238,7 @@ where This might be a bug in Moka" ), _ => { - // Retry from the beginning. + // Try to insert our waiter again. continue; } } @@ -242,7 +247,7 @@ where // Our waiter was inserted. // Get the current value. - let maybe_entry = cache.get_entry_without_recording(c_key, c_hash); + let maybe_entry = cache.get_entry(c_key, c_hash); let maybe_value = if allow_nop { maybe_entry.as_ref().map(|ent| ent.value().clone()) } else { @@ -250,8 +255,8 @@ where }; let entry_existed = maybe_entry.is_some(); - // Let's evaluate the `f` closure and get a future. Catching panic is safe - // here as we will not evaluate the closure again. + // Evaluate the `f` closure. Catching panic is safe here as we will not + // evaluate the closure again. match catch_unwind(AssertUnwindSafe(|| f(maybe_entry))) { // Evaluated. Ok(op) => { @@ -283,7 +288,7 @@ where } // Panicked. Err(payload) => { - *lock = WaiterValue::InitFuturePanicked; + *lock = WaiterValue::InitClosurePanicked; // Remove the waiter so that others can retry. self.remove_waiter(w_key, w_hash); resume_unwind(payload); From aee7ebe8d12448e7bd22bc6b530a39b2bfe05315 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sun, 7 Jan 2024 18:02:31 +0800 Subject: [PATCH 15/16] Compute API - Change the return type of the entry `and_compute_with` and `and_try_compute_with` methods --- examples/append_value_async.rs | 8 +- examples/append_value_sync.rs | 6 +- examples/bounded_counter_async.rs | 43 +++--- examples/bounded_counter_sync.rs | 44 +++--- examples/counter_async.rs | 6 +- examples/counter_sync.rs | 6 +- examples/try_append_value_async.rs | 30 ++-- examples/try_append_value_sync.rs | 30 ++-- src/common/entry.rs | 25 ++-- src/future/cache.rs | 178 ++++++++--------------- src/future/entry_selector.rs | 226 ++++++++++++----------------- src/future/value_initializer.rs | 100 +++++++------ src/ops.rs | 67 ++++++--- src/sync/cache.rs | 213 ++++++++------------------- src/sync/entry_selector.rs | 224 ++++++++++++---------------- src/sync/value_initializer.rs | 112 ++++++++------ 16 files changed, 591 insertions(+), 727 deletions(-) diff --git a/examples/append_value_async.rs b/examples/append_value_async.rs index 4d48c649..b26f9984 100644 --- a/examples/append_value_async.rs +++ b/examples/append_value_async.rs @@ -27,18 +27,20 @@ async fn main() { let key = "key".to_string(); let entry = append_to_cached_vec(&cache, &key, 1).await; + // It was not an update. + assert!(!entry.is_old_value_replaced()); assert!(entry.is_fresh()); - assert!(!entry.is_updated()); assert_eq!(*entry.into_value().read().await, &[1]); let entry = append_to_cached_vec(&cache, &key, 2).await; assert!(entry.is_fresh()); - assert!(entry.is_updated()); + // It was an update. + assert!(entry.is_old_value_replaced()); assert_eq!(*entry.into_value().read().await, &[1, 2]); let entry = append_to_cached_vec(&cache, &key, 3).await; assert!(entry.is_fresh()); - assert!(entry.is_updated()); + assert!(entry.is_old_value_replaced()); assert_eq!(*entry.into_value().read().await, &[1, 2, 3]); } diff --git a/examples/append_value_sync.rs b/examples/append_value_sync.rs index aef650a5..23db556b 100644 --- a/examples/append_value_sync.rs +++ b/examples/append_value_sync.rs @@ -26,17 +26,17 @@ fn main() { let entry = append_to_cached_vec(&cache, &key, 1); assert!(entry.is_fresh()); - assert!(!entry.is_updated()); + assert!(!entry.is_old_value_replaced()); assert_eq!(*entry.into_value().read().unwrap(), &[1]); let entry = append_to_cached_vec(&cache, &key, 2); assert!(entry.is_fresh()); - assert!(entry.is_updated()); + assert!(entry.is_old_value_replaced()); assert_eq!(*entry.into_value().read().unwrap(), &[1, 2]); let entry = append_to_cached_vec(&cache, &key, 3); assert!(entry.is_fresh()); - assert!(entry.is_updated()); + assert!(entry.is_old_value_replaced()); assert_eq!(*entry.into_value().read().unwrap(), &[1, 2, 3]); } diff --git a/examples/bounded_counter_async.rs b/examples/bounded_counter_async.rs index c173bd46..57ace065 100644 --- a/examples/bounded_counter_async.rs +++ b/examples/bounded_counter_async.rs @@ -3,8 +3,7 @@ use moka::{ future::Cache, - ops::compute::{self, PerformedOp}, - Entry, + ops::compute::{CompResult, Op}, }; #[tokio::main] @@ -12,34 +11,38 @@ async fn main() { let cache: Cache = Cache::new(100); let key = "key".to_string(); - // This should insert a now counter value 1 to the cache, and return the value + // This should insert a new counter value 1 to the cache, and return the value // with the kind of the operation performed. - let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - let entry = maybe_entry.expect("An entry should be returned"); + let result = inclement_or_remove_counter(&cache, &key).await; + let CompResult::Inserted(entry) = result else { + panic!("`Inserted` should be returned: {result:?}"); + }; assert_eq!(entry.into_value(), 1); - assert_eq!(performed_op, PerformedOp::Inserted); // This should increment the cached counter value by 1. - let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - let entry = maybe_entry.expect("An entry should be returned"); + let result = inclement_or_remove_counter(&cache, &key).await; + let CompResult::ReplacedWith(entry) = result else { + panic!("`ReplacedWith` should be returned: {result:?}"); + }; assert_eq!(entry.into_value(), 2); - assert_eq!(performed_op, PerformedOp::Updated); // This should remove the cached counter from the cache, and returns the // _removed_ value. - let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - let entry = maybe_entry.expect("An entry should be returned"); + let result = inclement_or_remove_counter(&cache, &key).await; + let CompResult::Removed(entry) = result else { + panic!("`Removed` should be returned: {result:?}"); + }; assert_eq!(entry.into_value(), 2); - assert_eq!(performed_op, PerformedOp::Removed); - // The key should no longer exist. + // The key should not exist. assert!(!cache.contains_key(&key)); // This should start over; insert a new counter value 1 to the cache. - let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - let entry = maybe_entry.expect("An entry should be returned"); + let result = inclement_or_remove_counter(&cache, &key).await; + let CompResult::Inserted(entry) = result else { + panic!("`Inserted` should be returned: {result:?}"); + }; assert_eq!(entry.into_value(), 1); - assert_eq!(performed_op, PerformedOp::Inserted); } /// Increment a cached `u64` counter. If the counter is greater than or equal to 2, @@ -49,7 +52,7 @@ async fn main() { async fn inclement_or_remove_counter( cache: &Cache, key: &str, -) -> (Option>, compute::PerformedOp) { +) -> CompResult { // - If the counter does not exist, insert a new value of 1. // - If the counter is less than 2, increment it by 1. // - If the counter is greater than or equal to 2, remove it. @@ -61,14 +64,14 @@ async fn inclement_or_remove_counter( let counter = entry.into_value(); if counter < 2 { // Increment the counter by 1. - compute::Op::Put(counter.saturating_add(1)) + Op::Put(counter.saturating_add(1)) } else { // Remove the entry. - compute::Op::Remove + Op::Remove } } else { // The entry does not exist, insert a new value of 1. - compute::Op::Put(1) + Op::Put(1) }; // Return a Future that is resolved to `op` immediately. std::future::ready(op) diff --git a/examples/bounded_counter_sync.rs b/examples/bounded_counter_sync.rs index 703c6f6a..d067a54f 100644 --- a/examples/bounded_counter_sync.rs +++ b/examples/bounded_counter_sync.rs @@ -2,53 +2,53 @@ //! `and_compute_with` method of `Cache`. use moka::{ - ops::compute::{self, PerformedOp}, + ops::compute::{CompResult, Op}, sync::Cache, - Entry, }; fn main() { let cache: Cache = Cache::new(100); let key = "key".to_string(); - // This should insert a now counter value 1 to the cache, and return the value + // This should insert a new counter value 1 to the cache, and return the value // with the kind of the operation performed. - let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); - let entry = maybe_entry.expect("An entry should be returned"); + let result = inclement_or_remove_counter(&cache, &key); + let CompResult::Inserted(entry) = result else { + panic!("`Inserted` should be returned: {result:?}"); + }; assert_eq!(entry.into_value(), 1); - assert_eq!(performed_op, PerformedOp::Inserted); // This should increment the cached counter value by 1. - let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); - let entry = maybe_entry.expect("An entry should be returned"); + let result = inclement_or_remove_counter(&cache, &key); + let CompResult::ReplacedWith(entry) = result else { + panic!("`ReplacedWith` should be returned: {result:?}"); + }; assert_eq!(entry.into_value(), 2); - assert_eq!(performed_op, PerformedOp::Updated); // This should remove the cached counter from the cache, and returns the // _removed_ value. - let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); - let entry = maybe_entry.expect("An entry should be returned"); + let result = inclement_or_remove_counter(&cache, &key); + let CompResult::Removed(entry) = result else { + panic!("`Removed` should be returned: {result:?}"); + }; assert_eq!(entry.into_value(), 2); - assert_eq!(performed_op, PerformedOp::Removed); // The key should no longer exist. assert!(!cache.contains_key(&key)); // This should start over; insert a new counter value 1 to the cache. - let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); - let entry = maybe_entry.expect("An entry should be returned"); + let result = inclement_or_remove_counter(&cache, &key); + let CompResult::Inserted(entry) = result else { + panic!("`Inserted` should be returned: {result:?}"); + }; assert_eq!(entry.into_value(), 1); - assert_eq!(performed_op, PerformedOp::Inserted); } /// Increment a cached `u64` counter. If the counter is greater than or equal to 2, /// remove it. /// /// This method uses cache's `and_compute_with` method. -fn inclement_or_remove_counter( - cache: &Cache, - key: &str, -) -> (Option>, compute::PerformedOp) { +fn inclement_or_remove_counter(cache: &Cache, key: &str) -> CompResult { // - If the counter does not exist, insert a new value of 1. // - If the counter is less than 2, increment it by 1. // - If the counter is greater than or equal to 2, remove it. @@ -58,14 +58,14 @@ fn inclement_or_remove_counter( let counter = entry.into_value(); if counter < 2 { // Increment the counter by 1. - compute::Op::Put(counter.saturating_add(1)) + Op::Put(counter.saturating_add(1)) } else { // Remove the entry. - compute::Op::Remove + Op::Remove } } else { // The entry does not exist, insert a new value of 1. - compute::Op::Put(1) + Op::Put(1) } }) } diff --git a/examples/counter_async.rs b/examples/counter_async.rs index 929326c5..32e53495 100644 --- a/examples/counter_async.rs +++ b/examples/counter_async.rs @@ -10,17 +10,17 @@ async fn main() { let entry = increment_counter(&cache, &key).await; assert!(entry.is_fresh()); - assert!(!entry.is_updated()); + assert!(!entry.is_old_value_replaced()); assert_eq!(entry.into_value(), 1); let entry = increment_counter(&cache, &key).await; assert!(entry.is_fresh()); - assert!(entry.is_updated()); + assert!(entry.is_old_value_replaced()); assert_eq!(entry.into_value(), 2); let entry = increment_counter(&cache, &key).await; assert!(entry.is_fresh()); - assert!(entry.is_updated()); + assert!(entry.is_old_value_replaced()); assert_eq!(entry.into_value(), 3); } diff --git a/examples/counter_sync.rs b/examples/counter_sync.rs index 35a5cd60..5508875d 100644 --- a/examples/counter_sync.rs +++ b/examples/counter_sync.rs @@ -9,17 +9,17 @@ fn main() { let entry = increment_counter(&cache, &key); assert!(entry.is_fresh()); - assert!(!entry.is_updated()); + assert!(!entry.is_old_value_replaced()); assert_eq!(entry.into_value(), 1); let entry = increment_counter(&cache, &key); assert!(entry.is_fresh()); - assert!(entry.is_updated()); + assert!(entry.is_old_value_replaced()); assert_eq!(entry.into_value(), 2); let entry = increment_counter(&cache, &key); assert!(entry.is_fresh()); - assert!(entry.is_updated()); + assert!(entry.is_old_value_replaced()); assert_eq!(entry.into_value(), 3); } diff --git a/examples/try_append_value_async.rs b/examples/try_append_value_async.rs index e0cb7eed..0e9304d8 100644 --- a/examples/try_append_value_async.rs +++ b/examples/try_append_value_async.rs @@ -5,8 +5,7 @@ use std::{io::Cursor, pin::Pin, sync::Arc}; use moka::{ future::Cache, - ops::compute::{self, PerformedOp}, - Entry, + ops::compute::{CompResult, Op}, }; use tokio::{ io::{AsyncRead, AsyncReadExt}, @@ -46,22 +45,25 @@ async fn main() -> Result<(), tokio::io::Error> { tokio::pin!(reader); // Read the first char 'a' from the reader, and insert a string "a" to the cache. - let (maybe_entry, performed_op) = append_to_cached_string(&cache, key, &mut reader).await?; - let entry = maybe_entry.expect("An entry should be returned"); + let result = append_to_cached_string(&cache, key, &mut reader).await?; + let CompResult::Inserted(entry) = result else { + panic!("`Inserted` should be returned: {result:?}"); + }; assert_eq!(*entry.into_value().read().await, "a"); - assert_eq!(performed_op, PerformedOp::Inserted); // Read next char 'b' from the reader, and append it the cached string. - let (maybe_entry, performed_op) = append_to_cached_string(&cache, key, &mut reader).await?; - let entry = maybe_entry.expect("An entry should be returned"); + let result = append_to_cached_string(&cache, key, &mut reader).await?; + let CompResult::ReplacedWith(entry) = result else { + panic!("`ReplacedWith` should be returned: {result:?}"); + }; assert_eq!(*entry.into_value().read().await, "ab"); - assert_eq!(performed_op, PerformedOp::Updated); // Read next char 'c' from the reader, and append it the cached string. - let (maybe_entry, performed_op) = append_to_cached_string(&cache, key, &mut reader).await?; - let entry = maybe_entry.expect("An entry should be returned"); + let result = append_to_cached_string(&cache, key, &mut reader).await?; + let CompResult::ReplacedWith(entry) = result else { + panic!("`ReplacedWith` should be returned: {result:?}"); + }; assert_eq!(*entry.into_value().read().await, "abc"); - assert_eq!(performed_op, PerformedOp::Updated); // Reading should fail as no more char left. let err = append_to_cached_string(&cache, key, &mut reader).await; @@ -83,7 +85,7 @@ async fn append_to_cached_string( cache: &Cache, key: Key, reader: &mut Pin<&mut impl AsyncRead>, -) -> Result<(Option>, PerformedOp), tokio::io::Error> { +) -> Result, tokio::io::Error> { cache .entry(key) .and_try_compute_with(|maybe_entry| async { @@ -97,12 +99,12 @@ async fn append_to_cached_string( // The entry exists, append the char to the Vec. let v = entry.into_value(); v.write().await.push(char); - Ok(compute::Op::Put(v)) + Ok(Op::Put(v)) } else { // The entry does not exist, insert a new Vec containing // the char. let v = RwLock::new(String::from(char)); - Ok(compute::Op::Put(Arc::new(v))) + Ok(Op::Put(Arc::new(v))) } }) .await diff --git a/examples/try_append_value_sync.rs b/examples/try_append_value_sync.rs index 7cb49a1a..8c07e743 100644 --- a/examples/try_append_value_sync.rs +++ b/examples/try_append_value_sync.rs @@ -7,9 +7,8 @@ use std::{ }; use moka::{ - ops::compute::{self, PerformedOp}, + ops::compute::{CompResult, Op}, sync::Cache, - Entry, }; /// The type of the cache key. @@ -43,22 +42,25 @@ fn main() -> Result<(), tokio::io::Error> { let mut reader = Cursor::new(b"abc"); // Read the first char 'a' from the reader, and insert a string "a" to the cache. - let (maybe_entry, performed_op) = append_to_cached_string(&cache, key, &mut reader)?; - let entry = maybe_entry.expect("An entry should be returned"); + let result = append_to_cached_string(&cache, key, &mut reader)?; + let CompResult::Inserted(entry) = result else { + panic!("`Inserted` should be returned: {result:?}"); + }; assert_eq!(*entry.into_value().read().unwrap(), "a"); - assert_eq!(performed_op, PerformedOp::Inserted); // Read next char 'b' from the reader, and append it the cached string. - let (maybe_entry, performed_op) = append_to_cached_string(&cache, key, &mut reader)?; - let entry = maybe_entry.expect("An entry should be returned"); + let result = append_to_cached_string(&cache, key, &mut reader)?; + let CompResult::ReplacedWith(entry) = result else { + panic!("`ReplacedWith` should be returned: {result:?}"); + }; assert_eq!(*entry.into_value().read().unwrap(), "ab"); - assert_eq!(performed_op, PerformedOp::Updated); // Read next char 'c' from the reader, and append it the cached string. - let (maybe_entry, performed_op) = append_to_cached_string(&cache, key, &mut reader)?; - let entry = maybe_entry.expect("An entry should be returned"); + let result = append_to_cached_string(&cache, key, &mut reader)?; + let CompResult::ReplacedWith(entry) = result else { + panic!("`ReplacedWith` should be returned: {result:?}"); + }; assert_eq!(*entry.into_value().read().unwrap(), "abc"); - assert_eq!(performed_op, PerformedOp::Updated); // Reading should fail as no more char left. let err = append_to_cached_string(&cache, key, &mut reader); @@ -80,7 +82,7 @@ fn append_to_cached_string( cache: &Cache, key: Key, reader: &mut impl Read, -) -> io::Result<(Option>, PerformedOp)> { +) -> io::Result> { cache.entry(key).and_try_compute_with(|maybe_entry| { // Read a char from the reader. let mut buf = [0u8]; @@ -100,12 +102,12 @@ fn append_to_cached_string( // The entry exists, append the char to the Vec. let v = entry.into_value(); v.write().unwrap().push(char); - Ok(compute::Op::Put(v)) + Ok(Op::Put(v)) } else { // The entry does not exist, insert a new Vec containing // the char. let v = RwLock::new(String::from(char)); - Ok(compute::Op::Put(Arc::new(v))) + Ok(Op::Put(Arc::new(v))) } }) } diff --git a/src/common/entry.rs b/src/common/entry.rs index 9b194746..a1fb820d 100644 --- a/src/common/entry.rs +++ b/src/common/entry.rs @@ -21,7 +21,7 @@ pub struct Entry { key: Option>, value: V, is_fresh: bool, - is_updated: bool, + is_old_value_replaced: bool, } impl Debug for Entry @@ -34,18 +34,23 @@ where .field("key", self.key()) .field("value", &self.value) .field("is_fresh", &self.is_fresh) - .field("is_updated", &self.is_updated) + .field("is_old_value_replaced", &self.is_old_value_replaced) .finish() } } impl Entry { - pub(crate) fn new(key: Option>, value: V, is_fresh: bool, is_updated: bool) -> Self { + pub(crate) fn new( + key: Option>, + value: V, + is_fresh: bool, + is_old_value_replaced: bool, + ) -> Self { Self { key, value, is_fresh, - is_updated, + is_old_value_replaced, } } @@ -76,12 +81,12 @@ impl Entry { self.is_fresh } - /// Returns `true` if the value in this `Entry` was already cached and replaced - /// with a new value. + /// Returns `true` if an old value existed in the cache and was replaced by the + /// value in this `Entry`. /// - /// Note that the new value can be the same as the old value. In that case, this - /// method still returns `true`. - pub fn is_updated(&self) -> bool { - self.is_updated + /// Note that the new value can be the same as the old value. This method still + /// returns `true` in that case. + pub fn is_old_value_replaced(&self) -> bool { + self.is_old_value_replaced } } diff --git a/src/future/cache.rs b/src/future/cache.rs index a52a9202..6a683b7e 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -1,12 +1,15 @@ use super::{ base_cache::BaseCache, - value_initializer::{ComputeResult, GetOrInsert, InitResult, ValueInitializer}, + value_initializer::{GetOrInsert, InitResult, ValueInitializer}, CacheBuilder, CancelGuard, Iter, OwnedKeyEntrySelector, PredicateId, RefKeyEntrySelector, WriteOp, }; use crate::{ - common::concurrent::Weigher, notification::AsyncEvictionListener, ops::compute, - policy::ExpirationPolicy, Entry, Policy, PredicateError, + common::concurrent::Weigher, + notification::AsyncEvictionListener, + ops::compute::{self, CompResult}, + policy::ExpirationPolicy, + Entry, Policy, PredicateError, }; #[cfg(feature = "unstable-debug-counters")] @@ -1836,41 +1839,19 @@ where key: Arc, hash: u64, f: F, - ) -> (Option>, compute::PerformedOp) + ) -> compute::CompResult where F: FnOnce(Option>) -> Fut, Fut: Future>, { let post_init = ValueInitializer::::post_init_for_compute_with; - match self .value_initializer - .try_compute(&key, hash, self, f, post_init, true) + .try_compute(key, hash, self, f, post_init, true) .await { - ComputeResult::Nop(maybe_value) => { - let maybe_entry = - maybe_value.map(|value| Entry::new(Some(key), value, false, false)); - (maybe_entry, compute::PerformedOp::Nop) - } - ComputeResult::Inserted(value) => { - crossbeam_epoch::pin().flush(); - let entry = Entry::new(Some(key), value, true, false); - (Some(entry), compute::PerformedOp::Inserted) - } - ComputeResult::Updated(value) => { - crossbeam_epoch::pin().flush(); - let entry = Entry::new(Some(key), value, true, true); - (Some(entry), compute::PerformedOp::Updated) - } - ComputeResult::Removed(value) => { - crossbeam_epoch::pin().flush(); - let entry = Entry::new(Some(key), value, false, false); - (Some(entry), compute::PerformedOp::Removed) - } - ComputeResult::EvalErr(_) => { - unreachable!() - } + Ok(result) => result, + Err(_) => unreachable!(), } } @@ -1879,41 +1860,16 @@ where key: Arc, hash: u64, f: F, - ) -> Result<(Option>, compute::PerformedOp), E> + ) -> Result, E> where F: FnOnce(Option>) -> Fut, Fut: Future, E>>, E: Send + Sync + 'static, { let post_init = ValueInitializer::::post_init_for_try_compute_with; - - match self - .value_initializer - .try_compute(&key, hash, self, f, post_init, true) + self.value_initializer + .try_compute(key, hash, self, f, post_init, true) .await - { - ComputeResult::Nop(maybe_value) => { - let maybe_entry = - maybe_value.map(|value| Entry::new(Some(key), value, false, false)); - Ok((maybe_entry, compute::PerformedOp::Nop)) - } - ComputeResult::Inserted(value) => { - crossbeam_epoch::pin().flush(); - let entry = Entry::new(Some(key), value, true, false); - Ok((Some(entry), compute::PerformedOp::Inserted)) - } - ComputeResult::Updated(value) => { - crossbeam_epoch::pin().flush(); - let entry = Entry::new(Some(key), value, true, true); - Ok((Some(entry), compute::PerformedOp::Updated)) - } - ComputeResult::Removed(value) => { - crossbeam_epoch::pin().flush(); - let entry = Entry::new(Some(key), value, false, false); - Ok((Some(entry), compute::PerformedOp::Removed)) - } - ComputeResult::EvalErr(e) => Err(e), - } } pub(crate) async fn upsert_with_hash_and_fun( @@ -1927,23 +1883,13 @@ where Fut: Future, { let post_init = ValueInitializer::::post_init_for_upsert_with; - match self .value_initializer - .try_compute(&key, hash, self, f, post_init, false) + .try_compute(key, hash, self, f, post_init, false) .await { - ComputeResult::Inserted(value) => { - crossbeam_epoch::pin().flush(); - Entry::new(Some(key), value, true, false) - } - ComputeResult::Updated(value) => { - crossbeam_epoch::pin().flush(); - Entry::new(Some(key), value, true, true) - } - ComputeResult::Nop(_) | ComputeResult::Removed(_) | ComputeResult::EvalErr(_) => { - unreachable!() - } + Ok(CompResult::Inserted(entry) | CompResult::ReplacedWith(entry)) => entry, + _ => unreachable!(), } } @@ -2173,6 +2119,7 @@ mod tests { Arc, }, time::{Duration, Instant as StdInstant}, + vec, }; use tokio::time::sleep; @@ -4584,36 +4531,41 @@ mod tests { } }; - let ((ent1, op1), (ent2, op2), (ent3, op3), (ent4, op4), (ent5, op5), (ent6, op6)) = + let (res1, res2, res3, res4, res5, res6) = futures_util::join!(task1, task2, task3, task4, task5, task6); - assert_eq!(op1, compute::PerformedOp::Inserted); - assert_eq!(op2, compute::PerformedOp::Updated); - assert_eq!(op3, compute::PerformedOp::Removed); - assert_eq!(op4, compute::PerformedOp::Nop); - assert_eq!(op5, compute::PerformedOp::Inserted); - assert_eq!(op6, compute::PerformedOp::Nop); + let compute::CompResult::Inserted(entry) = res1 else { + panic!("Expected `Inserted`. Got {res1:?}") + }; assert_eq!( - *ent1.expect("should have entry").into_value().read().await, + *entry.into_value().read().await, vec![1, 2] // The same Vec was modified by task2. ); - assert_eq!( - *ent2.expect("should have entry").into_value().read().await, - vec![1, 2] - ); - assert_eq!( - *ent3.expect("should have entry").into_value().read().await, - vec![1, 2] // Removed value - ); - assert!(ent4.is_none(),); - assert_eq!( - *ent5.expect("should have entry").into_value().read().await, - vec![5] - ); - assert_eq!( - *ent6.expect("should have entry").into_value().read().await, - vec![5] - ); + + let compute::CompResult::ReplacedWith(entry) = res2 else { + panic!("Expected `ReplacedWith`. Got {res2:?}") + }; + assert_eq!(*entry.into_value().read().await, vec![1, 2]); + + let compute::CompResult::Removed(entry) = res3 else { + panic!("Expected `Removed`. Got {res3:?}") + }; + assert_eq!(*entry.into_value().read().await, vec![1, 2]); + + let compute::CompResult::StillNone(key) = res4 else { + panic!("Expected `StillNone`. Got {res4:?}") + }; + assert_eq!(*key, KEY); + + let compute::CompResult::Inserted(entry) = res5 else { + panic!("Expected `Inserted`. Got {res5:?}") + }; + assert_eq!(*entry.into_value().read().await, vec![5]); + + let compute::CompResult::Unchanged(entry) = res6 else { + panic!("Expected `Unchanged`. Got {res6:?}") + }; + assert_eq!(*entry.into_value().read().await, vec![5]); } #[tokio::test] @@ -4712,31 +4664,27 @@ mod tests { }; let (res1, res2, res3, res4) = futures_util::join!(task1, task2, task3, task4); - let Ok((ent1, op1)) = res1 else { - panic!("res1 should be an Ok") - }; - let Ok((ent2, op2)) = res2 else { - panic!("res2 should be an Ok") - }; - assert!(res3.is_err()); - let Ok((ent4, op4)) = res4 else { - panic!("res4 should be an Ok") - }; - - assert_eq!(op1, compute::PerformedOp::Inserted); - assert_eq!(op2, compute::PerformedOp::Updated); - assert_eq!(op4, compute::PerformedOp::Removed); + let Ok(compute::CompResult::Inserted(entry)) = res1 else { + panic!("Expected `Inserted`. Got {res1:?}") + }; assert_eq!( - *ent1.expect("should have entry").into_value().read().await, + *entry.into_value().read().await, vec![1, 2] // The same Vec was modified by task2. ); + + let Ok(compute::CompResult::ReplacedWith(entry)) = res2 else { + panic!("Expected `ReplacedWith`. Got {res2:?}") + }; + assert_eq!(*entry.into_value().read().await, vec![1, 2]); + + assert!(res3.is_err()); + + let Ok(compute::CompResult::Removed(entry)) = res4 else { + panic!("Expected `Removed`. Got {res4:?}") + }; assert_eq!( - *ent2.expect("should have entry").into_value().read().await, - vec![1, 2] - ); - assert_eq!( - *ent4.expect("should have entry").into_value().read().await, + *entry.into_value().read().await, vec![1, 2] // Removed value. ); } diff --git a/src/future/entry_selector.rs b/src/future/entry_selector.rs index 3de20ccc..0fe106b4 100644 --- a/src/future/entry_selector.rs +++ b/src/future/entry_selector.rs @@ -54,22 +54,16 @@ where /// - `Op::Put(V)`: Put the new value `V` to the cache. /// - `Op::Remove`: Remove the current cached entry. /// - `Op::Nop`: Do nothing. - /// 4. Return an `(Option, ops::compute::PerformedOp)` as the followings: + /// 4. Return an `ops::compute::CompResult` as the followings: /// - /// | [`Op`] | `Entry` to return | [`PerformedOp`] | - /// |:--------- |:--------------------------- |:----------------------- | - /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | - /// | `Remove` | The _removed_ entry | `Removed` | - /// | `Nop` | The current entry or `None` | `Nop` | - /// - /// **Notes:** - /// - /// - `Op::Put(V)`: `PerformedOp::Updated` is returned when the key already - /// existed in the cache. It is _not_ related to whether the value was actually - /// updated or not. It can be replaced with the same value. - /// - `Op::Remove`: Unlike other ops, the _removed_ entry is returned. If you mix - /// `Remove` with other ops, ensure to check whether the performed op is - /// `Removed` or not. + /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | + /// |:--------- |:--- |:--------------------------- |:------------------------------- | + /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | + /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | + /// | `Remove` | no | `StillNone(Arc)` | | + /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | + /// | `Nop` | no | `StillNone(Arc)` | | + /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # Similar Methods /// @@ -78,9 +72,9 @@ where /// method. /// - If you only want to put, use the [`and_upsert_with`] method. /// - /// [`Entry`]: ../struct.Entry.html + /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html - /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_try_compute_with`]: #method.and_try_compute_with /// @@ -95,8 +89,7 @@ where /// /// use moka::{ /// future::Cache, - /// ops::compute::{self, PerformedOp}, - /// Entry, + /// ops::compute::{CompResult, Op}, /// }; /// /// #[tokio::main] @@ -109,19 +102,19 @@ where /// async fn inclement_or_remove_counter( /// cache: &Cache, /// key: &str, - /// ) -> (Option>, compute::PerformedOp) { + /// ) -> CompResult { /// cache /// .entry(key.to_string()) /// .and_compute_with(|maybe_entry| { /// let op = if let Some(entry) = maybe_entry { /// let counter = entry.into_value(); /// if counter < 2 { - /// compute::Op::Put(counter.saturating_add(1)) // Update + /// Op::Put(counter.saturating_add(1)) // Update /// } else { - /// compute::Op::Remove // Remove + /// Op::Remove /// } /// } else { - /// compute::Op::Put(1) // Insert + /// Op::Put(1) // Insert /// }; /// // Return a Future that is resolved to `op` immediately. /// std::future::ready(op) @@ -129,34 +122,38 @@ where /// .await /// } /// - /// // This should insert a now counter value 1 to the cache, and return the + /// // This should insert a new counter value 1 to the cache, and return the /// // value with the kind of the operation performed. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key).await; + /// let CompResult::Inserted(entry) = result else { + /// panic!("`Inserted` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 1); - /// assert_eq!(performed_op, PerformedOp::Inserted); /// /// // This should increment the cached counter value by 1. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key).await; + /// let CompResult::ReplacedWith(entry) = result else { + /// panic!("`ReplacedWith` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 2); - /// assert_eq!(performed_op, PerformedOp::Updated); /// /// // This should remove the cached counter from the cache, and returns the /// // _removed_ value. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key).await; + /// let CompResult::Removed(entry) = result else { + /// panic!("`Removed` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 2); - /// assert_eq!(performed_op, PerformedOp::Removed); /// - /// // The key should no longer exist. + /// // The key should not exist. /// assert!(!cache.contains_key(&key)); /// /// // This should start over; insert a new counter value 1 to the cache. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key).await; + /// let CompResult::Inserted(entry) = result else { + /// panic!("`Inserted` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 1); - /// assert_eq!(performed_op, PerformedOp::Inserted); /// } /// ``` /// @@ -166,7 +163,7 @@ where /// serially. That is, `and_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. - pub async fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) + pub async fn and_compute_with(self, f: F) -> compute::CompResult where F: FnOnce(Option>) -> Fut, Fut: Future>, @@ -193,23 +190,16 @@ where /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache. /// - `Ok(Op::Remove)`: Remove the current cached entry. /// - `Ok(Op::Nop)`: Do nothing. - /// 5. Return a `Ok((Option, ops::compute::PerformedOp))` as the - /// followings: + /// 5. Return an `Ok(ops::compute::CompResult)` as the followings: /// - /// | [`Op`] | `Entry` to return | [`PerformedOp`] | - /// |:--------- |:--------------------------- |:----------------------- | - /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | - /// | `Remove` | The _removed_ entry | `Removed` | - /// | `Nop` | The current entry or `None` | `Nop` | - /// - /// **Notes:** - /// - /// - `Ok(Op::Put(V))`: `PerformedOp::Updated` is returned when the key already - /// existed in the cache. It is _not_ related to whether the value was actually - /// updated or not. It can be replaced with the same value. - /// - `Ok(Op::Remove)`: Unlike other ops, the _removed_ entry is returned. If you - /// mix `Remove` with other ops, ensure to check whether the performed op is - /// `Removed` or not. + /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | + /// |:--------- |:--- |:--------------------------- |:------------------------------- | + /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | + /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | + /// | `Remove` | no | `StillNone(Arc)` | | + /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | + /// | `Nop` | no | `StillNone(Arc)` | | + /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # Similar Methods /// @@ -217,9 +207,9 @@ where /// the [`and_compute_with`] method. /// - If you only want to put, use the [`and_upsert_with`] method. /// - /// [`Entry`]: ../struct.Entry.html + /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html - /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_compute_with`]: #method.and_compute_with /// @@ -236,10 +226,7 @@ where /// serially. That is, `and_try_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. - pub async fn and_try_compute_with( - self, - f: F, - ) -> Result<(Option>, compute::PerformedOp), E> + pub async fn and_try_compute_with(self, f: F) -> Result, E> where F: FnOnce(Option>) -> Fut, Fut: Future, E>>, @@ -305,7 +292,7 @@ where /// }) /// .await; /// // It was not an update. - /// assert!(!entry.is_updated()); + /// assert!(!entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 1); /// @@ -321,16 +308,12 @@ where /// }) /// .await; /// // It was an update. - /// assert!(entry.is_updated()); + /// assert!(entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 2); /// } /// ``` /// - /// Note: The `is_updated` method of the `Entry` returns `true` when the key - /// already existed in the cache. It is not related to whether the value was - /// actually updated or not. It can be replaced with the same value. - /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed @@ -711,22 +694,16 @@ where /// - `Op::Put(V)`: Put the new value `V` to the cache. /// - `Op::Remove`: Remove the current cached entry. /// - `Op::Nop`: Do nothing. - /// 4. Return an `(Option, ops::compute::PerformedOp)` as the followings: - /// - /// | [`Op`] | `Entry` to return | [`PerformedOp`] | - /// |:--------- |:--------------------------- |:----------------------- | - /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | - /// | `Remove` | The _removed_ entry | `Removed` | - /// | `Nop` | The current entry or `None` | `Nop` | - /// - /// **Notes:** + /// 4. Return an `ops::compute::CompResult` as the followings: /// - /// - `Op::Put(V)`: `PerformedOp::Updated` is returned when the key already - /// existed in the cache. It is _not_ related to whether the value was actually - /// updated or not. It can be replaced with the same value. - /// - `Op::Remove`: Unlike other ops, the _removed_ entry is returned. If you mix - /// `Remove` with other ops, ensure to check whether the performed op is - /// `Removed` or not. + /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | + /// |:--------- |:--- |:--------------------------- |:------------------------------- | + /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | + /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | + /// | `Remove` | no | `StillNone(Arc)` | | + /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | + /// | `Nop` | no | `StillNone(Arc)` | | + /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # Similar Methods /// @@ -735,9 +712,9 @@ where /// method. /// - If you only want to put, use the [`and_upsert_with`] method. /// - /// [`Entry`]: ../struct.Entry.html + /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html - /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_try_compute_with`]: #method.and_try_compute_with /// @@ -752,8 +729,7 @@ where /// /// use moka::{ /// future::Cache, - /// ops::compute::{self, PerformedOp}, - /// Entry, + /// ops::compute::{CompResult, Op}, /// }; /// /// #[tokio::main] @@ -766,19 +742,19 @@ where /// async fn inclement_or_remove_counter( /// cache: &Cache, /// key: &str, - /// ) -> (Option>, compute::PerformedOp) { + /// ) -> CompResult { /// cache /// .entry_by_ref(key) /// .and_compute_with(|maybe_entry| { /// let op = if let Some(entry) = maybe_entry { /// let counter = entry.into_value(); /// if counter < 2 { - /// compute::Op::Put(counter.saturating_add(1)) // Update + /// Op::Put(counter.saturating_add(1)) // Update /// } else { - /// compute::Op::Remove // Remove + /// Op::Remove /// } /// } else { - /// compute::Op::Put(1) // Insert + /// Op::Put(1) // Insert /// }; /// // Return a Future that is resolved to `op` immediately. /// std::future::ready(op) @@ -788,32 +764,36 @@ where /// /// // This should insert a now counter value 1 to the cache, and return the /// // value with the kind of the operation performed. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key).await; + /// let CompResult::Inserted(entry) = result else { + /// panic!("`Inserted` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 1); - /// assert_eq!(performed_op, PerformedOp::Inserted); /// /// // This should increment the cached counter value by 1. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key).await; + /// let CompResult::ReplacedWith(entry) = result else { + /// panic!("`ReplacedWith` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 2); - /// assert_eq!(performed_op, PerformedOp::Updated); /// /// // This should remove the cached counter from the cache, and returns the /// // _removed_ value. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key).await; + /// let CompResult::Removed(entry) = result else { + /// panic!("`Removed` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 2); - /// assert_eq!(performed_op, PerformedOp::Removed); /// /// // The key should no longer exist. /// assert!(!cache.contains_key(key)); /// /// // This should start over; insert a new counter value 1 to the cache. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key).await; - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key).await; + /// let CompResult::Inserted(entry) = result else { + /// panic!("`Inserted` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 1); - /// assert_eq!(performed_op, PerformedOp::Inserted); /// } /// ``` /// @@ -823,7 +803,7 @@ where /// serially. That is, `and_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. - pub async fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) + pub async fn and_compute_with(self, f: F) -> compute::CompResult where F: FnOnce(Option>) -> Fut, Fut: Future>, @@ -850,23 +830,16 @@ where /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache. /// - `Ok(Op::Remove)`: Remove the current cached entry. /// - `Ok(Op::Nop)`: Do nothing. - /// 5. Return a `Ok((Option, ops::compute::PerformedOp))` as the - /// followings: - /// - /// | [`Op`] | `Entry` to return | [`PerformedOp`] | - /// |:--------- |:--------------------------- |:----------------------- | - /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | - /// | `Remove` | The _removed_ entry | `Removed` | - /// | `Nop` | The current entry or `None` | `Nop` | + /// 5. Return an `Ok(ops::compute::CompResult)` as the followings: /// - /// **Notes:** - /// - /// - `Ok(Op::Put(V))`: `PerformedOp::Updated` is returned when the key already - /// existed in the cache. It is _not_ related to whether the value was actually - /// updated or not. It can be replaced with the same value. - /// - `Ok(Op::Remove)`: Unlike other ops, the _removed_ entry is returned. If you - /// mix `Remove` with other ops, ensure to check whether the performed op is - /// `Removed` or not. + /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | + /// |:--------- |:--- |:--------------------------- |:------------------------------- | + /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | + /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | + /// | `Remove` | no | `StillNone(Arc)` | | + /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | + /// | `Nop` | no | `StillNone(Arc)` | | + /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # Similar Methods /// @@ -874,9 +847,9 @@ where /// the [`and_compute_with`] method. /// - If you only want to put, use the [`and_upsert_with`] method. /// - /// [`Entry`]: ../struct.Entry.html + /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html - /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_compute_with`]: #method.and_compute_with /// @@ -893,10 +866,7 @@ where /// serially. That is, `and_try_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. - pub async fn and_try_compute_with( - self, - f: F, - ) -> Result<(Option>, compute::PerformedOp), E> + pub async fn and_try_compute_with(self, f: F) -> Result, E> where F: FnOnce(Option>) -> Fut, Fut: Future, E>>, @@ -962,7 +932,7 @@ where /// }) /// .await; /// // It was not an update. - /// assert!(!entry.is_updated()); + /// assert!(!entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 1); /// @@ -978,16 +948,12 @@ where /// }) /// .await; /// // It was an update. - /// assert!(entry.is_updated()); + /// assert!(entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 2); /// } /// ``` /// - /// Note: The `is_updated` method of the `Entry` returns `true` when the key - /// already existed in the cache. It is not related to whether the value was - /// actually updated or not. It can be replaced with the same value. - /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed diff --git a/src/future/value_initializer.rs b/src/future/value_initializer.rs index bd04fc39..831f29b0 100644 --- a/src/future/value_initializer.rs +++ b/src/future/value_initializer.rs @@ -11,7 +11,10 @@ use std::{ }; use triomphe::Arc as TrioArc; -use crate::{ops::compute, Entry}; +use crate::{ + ops::compute::{CompResult, Op}, + Entry, +}; use super::{ComputeNone, OptionallyNone}; @@ -52,14 +55,6 @@ pub(crate) enum InitResult { InitErr(Arc), } -pub(crate) enum ComputeResult { - Inserted(V), - Updated(V), - Removed(V), - Nop(Option), - EvalErr(E), -} - enum WaiterValue { Computing, Ready(Result), @@ -295,13 +290,13 @@ where /// Panics if the `init` future has been panicked. pub(crate) async fn try_compute<'a, C, F, Fut, O, E>( &'a self, - c_key: &Arc, + c_key: Arc, c_hash: u64, cache: &C, f: F, - post_init: fn(O) -> Result, E>, + post_init: fn(O) -> Result, E>, allow_nop: bool, - ) -> ComputeResult + ) -> Result, E> where C: GetOrInsert + Send + 'a, F: FnOnce(Option>) -> Fut, @@ -309,10 +304,9 @@ where E: Send + Sync + 'static, { use std::panic::{resume_unwind, AssertUnwindSafe}; - use ComputeResult::{EvalErr, Inserted, Nop, Removed, Updated}; let type_id = TypeId::of::(); - let (w_key, w_hash) = waiter_key_hash(&self.waiters, c_key, type_id); + let (w_key, w_hash) = waiter_key_hash(&self.waiters, &c_key, type_id); let waiter = TrioArc::new(RwLock::new(WaiterValue::Computing)); // NOTE: We have to acquire a write lock before `try_insert_waiter`, // so that any concurrent attempt will get our lock and wait on it. @@ -350,7 +344,7 @@ where let waiter_guard = WaiterGuard::new(w_key, w_hash, &self.waiters, lock); // Get the current value. - let maybe_entry = cache.get_entry(c_key, c_hash).await; + let maybe_entry = cache.get_entry(&c_key, c_hash).await; let maybe_value = if allow_nop { maybe_entry.as_ref().map(|ent| ent.value().clone()) } else { @@ -372,39 +366,57 @@ where // Resolve the `fut` future. Catching panic is safe here as we will not // resolve the future again. - match AssertUnwindSafe(fut).catch_unwind().await { + let output = match AssertUnwindSafe(fut).catch_unwind().await { // Resolved. - Ok(op) => { + Ok(output) => { waiter_guard.set_waiter_value(WaiterValue::ReadyNone); - match post_init(op) { - Ok(op) => match op { - compute::Op::Nop => Nop(maybe_value), - compute::Op::Put(value) => { - cache.insert(Arc::clone(c_key), c_hash, value.clone()).await; - if entry_existed { - Updated(value) - } else { - Inserted(value) - } - } - compute::Op::Remove => { - let maybe_prev_v = cache.remove(c_key, c_hash).await; - if let Some(prev_v) = maybe_prev_v { - Removed(prev_v) - } else { - Nop(None) - } - } - }, - Err(e) => EvalErr(e), - } + output } // Panicked. Err(payload) => { waiter_guard.set_waiter_value(WaiterValue::InitFuturePanicked); resume_unwind(payload); } + }; + + match post_init(output)? { + Op::Nop => { + if let Some(value) = maybe_value { + Ok(CompResult::Unchanged(Entry::new( + Some(c_key), + value, + false, + false, + ))) + } else { + Ok(CompResult::StillNone(c_key)) + } + } + Op::Put(value) => { + cache + .insert(Arc::clone(&c_key), c_hash, value.clone()) + .await; + if entry_existed { + crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(c_key), value, true, true); + Ok(CompResult::ReplacedWith(entry)) + } else { + let entry = Entry::new(Some(c_key), value, true, false); + Ok(CompResult::Inserted(entry)) + } + } + Op::Remove => { + let maybe_prev_v = cache.remove(&c_key, c_hash).await; + if let Some(prev_v) = maybe_prev_v { + let entry = Entry::new(Some(c_key), prev_v, false, false); + crossbeam_epoch::pin().flush(); + Ok(CompResult::Removed(entry)) + } else { + Ok(CompResult::StillNone(c_key)) + } + } } + // The lock will be unlocked here. } @@ -430,19 +442,17 @@ where } /// The `post_init` function for the `and_upsert_with` method of cache. - pub(crate) fn post_init_for_upsert_with(value: V) -> Result, ()> { - Ok(compute::Op::Put(value)) + pub(crate) fn post_init_for_upsert_with(value: V) -> Result, ()> { + Ok(Op::Put(value)) } /// The `post_init` function for the `and_compute_with` method of cache. - pub(crate) fn post_init_for_compute_with(op: compute::Op) -> Result, ()> { + pub(crate) fn post_init_for_compute_with(op: Op) -> Result, ()> { Ok(op) } /// The `post_init` function for the `and_try_compute_with` method of cache. - pub(crate) fn post_init_for_try_compute_with( - op: Result, E>, - ) -> Result, E> + pub(crate) fn post_init_for_try_compute_with(op: Result, E>) -> Result, E> where E: Send + Sync + 'static, { diff --git a/src/ops.rs b/src/ops.rs index a1f967d2..11598330 100644 --- a/src/ops.rs +++ b/src/ops.rs @@ -2,6 +2,9 @@ /// Operations used by the `and_compute_with` and similar methods. pub mod compute { + use std::sync::Arc; + + use crate::Entry; /// Instructs the `and_compute_with` and similar methods how to modify the cached /// entry. @@ -15,24 +18,54 @@ pub mod compute { Remove, } - /// Will be returned by the `and_compute_with` and similar methods to indicate - /// what kind of operation was performed. - #[derive(Debug, Clone, Copy, PartialEq, Eq)] - pub enum PerformedOp { - /// The entry did not exist, or already existed but was not modified. - Nop, - /// The entry did not exist and was inserted. - Inserted, - /// The entry already existed and its value may have been updated. + /// The result of the `and_compute_with` and similar methods. + #[derive(Debug)] + pub enum CompResult { + /// The entry did not exist and still does not exist. + StillNone(Arc), + /// The entry already existed and was not modified. The returned entry + /// contains the existing value. + Unchanged(Entry), + /// The entry did not exist and was inserted. The returned entry contains + /// the inserted value. + Inserted(Entry), + /// The entry already existed and its value was replaced with a new one. The + /// returned entry contains the new value (not the replaced value). + ReplacedWith(Entry), + /// The entry already existed and was removed. The returned entry contains + /// the removed value. /// - /// Note: `Updated` is returned if `Op::Put` was requested and the entry - /// already existed. It is _not_ related to whether the value was actually - /// updated or not. - Updated, - /// The entry already existed and was removed. - /// - /// Note: `Nop` is returned instead of `Removed` if `Op::Remove` was + /// Note: `StillNone` is returned instead of `Removed` if `Op::Remove` was /// requested but the entry did not exist. - Removed, + Removed(Entry), + } + + impl CompResult { + /// Returns the contained `Some(Entry)` if any. Otherwise returns `None`. + /// Consumes the `self` value. + pub fn into_entry(self) -> Option> { + match self { + CompResult::StillNone(_) => None, + CompResult::Unchanged(entry) => Some(entry), + CompResult::Inserted(entry) => Some(entry), + CompResult::ReplacedWith(entry) => Some(entry), + CompResult::Removed(entry) => Some(entry), + } + } + + /// Unwraps the contained `Entry`, consuming the `self` value. + /// + /// # Panics + /// + /// Panics if the `self` value is `StillNone`. + pub fn unwrap(self) -> Entry { + match self { + CompResult::StillNone(_) => panic!("`CompResult::unwrap` called on `StillNone`"), + CompResult::Unchanged(entry) => entry, + CompResult::Inserted(entry) => entry, + CompResult::ReplacedWith(entry) => entry, + CompResult::Removed(entry) => entry, + } + } } } diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 2ba521ab..b8f430ad 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -1,5 +1,5 @@ use super::{ - value_initializer::{ComputeResult, GetOrInsert, InitResult, ValueInitializer}, + value_initializer::{GetOrInsert, InitResult, ValueInitializer}, CacheBuilder, OwnedKeyEntrySelector, RefKeyEntrySelector, }; use crate::{ @@ -10,7 +10,7 @@ use crate::{ time::Instant, }, notification::EvictionListener, - ops::compute, + ops::compute::{self, CompResult}, policy::ExpirationPolicy, sync::{Iter, PredicateId}, sync_base::{ @@ -1508,39 +1508,17 @@ where key: Arc, hash: u64, f: F, - ) -> (Option>, compute::PerformedOp) + ) -> compute::CompResult where F: FnOnce(Option>) -> compute::Op, { let post_init = ValueInitializer::::post_init_for_compute_with; - match self .value_initializer - .try_compute(&key, hash, self, f, post_init, true) + .try_compute(key, hash, self, f, post_init, true) { - ComputeResult::Nop(maybe_value) => { - let maybe_entry = - maybe_value.map(|value| Entry::new(Some(key), value, false, false)); - (maybe_entry, compute::PerformedOp::Nop) - } - ComputeResult::Inserted(value) => { - crossbeam_epoch::pin().flush(); - let entry = Entry::new(Some(key), value, true, false); - (Some(entry), compute::PerformedOp::Inserted) - } - ComputeResult::Updated(value) => { - crossbeam_epoch::pin().flush(); - let entry = Entry::new(Some(key), value, true, true); - (Some(entry), compute::PerformedOp::Updated) - } - ComputeResult::Removed(value) => { - crossbeam_epoch::pin().flush(); - let entry = Entry::new(Some(key), value, false, false); - (Some(entry), compute::PerformedOp::Removed) - } - ComputeResult::EvalErr(_) => { - unreachable!() - } + Ok(result) => result, + Err(_) => unreachable!(), } } @@ -1549,39 +1527,14 @@ where key: Arc, hash: u64, f: F, - ) -> Result<(Option>, compute::PerformedOp), E> + ) -> Result, E> where F: FnOnce(Option>) -> Result, E>, E: Send + Sync + 'static, { let post_init = ValueInitializer::::post_init_for_try_compute_with; - - match self - .value_initializer - .try_compute(&key, hash, self, f, post_init, true) - { - ComputeResult::Nop(maybe_value) => { - let maybe_entry = - maybe_value.map(|value| Entry::new(Some(key), value, false, false)); - Ok((maybe_entry, compute::PerformedOp::Nop)) - } - ComputeResult::Inserted(value) => { - crossbeam_epoch::pin().flush(); - let entry = Entry::new(Some(key), value, true, false); - Ok((Some(entry), compute::PerformedOp::Inserted)) - } - ComputeResult::Updated(value) => { - crossbeam_epoch::pin().flush(); - let entry = Entry::new(Some(key), value, true, true); - Ok((Some(entry), compute::PerformedOp::Updated)) - } - ComputeResult::Removed(value) => { - crossbeam_epoch::pin().flush(); - let entry = Entry::new(Some(key), value, false, false); - Ok((Some(entry), compute::PerformedOp::Removed)) - } - ComputeResult::EvalErr(e) => Err(e), - } + self.value_initializer + .try_compute(key, hash, self, f, post_init, true) } pub(crate) fn upsert_with_hash_and_fun(&self, key: Arc, hash: u64, f: F) -> Entry @@ -1589,22 +1542,12 @@ where F: FnOnce(Option>) -> V, { let post_init = ValueInitializer::::post_init_for_upsert_with; - match self .value_initializer - .try_compute(&key, hash, self, f, post_init, false) + .try_compute(key, hash, self, f, post_init, false) { - ComputeResult::Inserted(value) => { - crossbeam_epoch::pin().flush(); - Entry::new(Some(key), value, true, false) - } - ComputeResult::Updated(value) => { - crossbeam_epoch::pin().flush(); - Entry::new(Some(key), value, true, true) - } - ComputeResult::Nop(_) | ComputeResult::Removed(_) | ComputeResult::EvalErr(_) => { - unreachable!() - } + Ok(CompResult::Inserted(entry) | CompResult::ReplacedWith(entry)) => entry, + _ => unreachable!(), } } @@ -4203,60 +4146,45 @@ mod tests { }) }; - let (ent1, op1) = thread1.join().expect("Thread 1 should finish"); - let (ent2, op2) = thread2.join().expect("Thread 2 should finish"); - let (ent3, op3) = thread3.join().expect("Thread 3 should finish"); - let (ent4, op4) = thread4.join().expect("Thread 4 should finish"); - let (ent5, op5) = thread5.join().expect("Thread 5 should finish"); - let (ent6, op6) = thread6.join().expect("Thread 6 should finish"); - assert_eq!(op1, compute::PerformedOp::Inserted); - assert_eq!(op2, compute::PerformedOp::Updated); - assert_eq!(op3, compute::PerformedOp::Removed); - assert_eq!(op4, compute::PerformedOp::Nop); - assert_eq!(op5, compute::PerformedOp::Inserted); - assert_eq!(op6, compute::PerformedOp::Nop); + let res1 = thread1.join().expect("Thread 1 should finish"); + let res2 = thread2.join().expect("Thread 2 should finish"); + let res3 = thread3.join().expect("Thread 3 should finish"); + let res4 = thread4.join().expect("Thread 4 should finish"); + let res5 = thread5.join().expect("Thread 5 should finish"); + let res6 = thread6.join().expect("Thread 6 should finish"); + let compute::CompResult::Inserted(entry) = res1 else { + panic!("Expected `Inserted`. Got {res1:?}") + }; assert_eq!( - *ent1 - .expect("should have entry") - .into_value() - .read() - .unwrap(), + *entry.into_value().read().unwrap(), vec![1, 2] // The same Vec was modified by task2. ); - assert_eq!( - *ent2 - .expect("should have entry") - .into_value() - .read() - .unwrap(), - vec![1, 2] - ); - assert_eq!( - *ent3 - .expect("should have entry") - .into_value() - .read() - .unwrap(), - vec![1, 2] // Removed value - ); - assert!(ent4.is_none(),); - assert_eq!( - *ent5 - .expect("should have entry") - .into_value() - .read() - .unwrap(), - vec![5] - ); - assert_eq!( - *ent6 - .expect("should have entry") - .into_value() - .read() - .unwrap(), - vec![5] - ); + + let compute::CompResult::ReplacedWith(entry) = res2 else { + panic!("Expected `ReplacedWith`. Got {res2:?}") + }; + assert_eq!(*entry.into_value().read().unwrap(), vec![1, 2]); + + let compute::CompResult::Removed(entry) = res3 else { + panic!("Expected `Removed`. Got {res3:?}") + }; + assert_eq!(*entry.into_value().read().unwrap(), vec![1, 2]); + + let compute::CompResult::StillNone(key) = res4 else { + panic!("Expected `StillNone`. Got {res4:?}") + }; + assert_eq!(*key, KEY); + + let compute::CompResult::Inserted(entry) = res5 else { + panic!("Expected `Inserted`. Got {res5:?}") + }; + assert_eq!(*entry.into_value().read().unwrap(), vec![5]); + + let compute::CompResult::Unchanged(entry) = res6 else { + panic!("Expected `Unchanged`. Got {res6:?}") + }; + assert_eq!(*entry.into_value().read().unwrap(), vec![5]); } #[test] @@ -4352,43 +4280,26 @@ mod tests { let res3 = thread3.join().expect("Thread 3 should finish"); let res4 = thread4.join().expect("Thread 4 should finish"); - let Ok((ent1, op1)) = res1 else { - panic!("res1 should be an Ok") - }; - let Ok((ent2, op2)) = res2 else { - panic!("res2 should be an Ok") + let Ok(compute::CompResult::Inserted(entry)) = res1 else { + panic!("Expected `Inserted`. Got {res1:?}") }; - assert!(res3.is_err()); - let Ok((ent4, op4)) = res4 else { - panic!("res4 should be an Ok") - }; - - assert_eq!(op1, compute::PerformedOp::Inserted); - assert_eq!(op2, compute::PerformedOp::Updated); - assert_eq!(op4, compute::PerformedOp::Removed); - assert_eq!( - *ent1 - .expect("should have entry") - .into_value() - .read() - .unwrap(), + *entry.into_value().read().unwrap(), vec![1, 2] // The same Vec was modified by task2. ); + + let Ok(compute::CompResult::ReplacedWith(entry)) = res2 else { + panic!("Expected `ReplacedWith`. Got {res2:?}") + }; + assert_eq!(*entry.into_value().read().unwrap(), vec![1, 2]); + + assert!(res3.is_err()); + + let Ok(compute::CompResult::Removed(entry)) = res4 else { + panic!("Expected `Removed`. Got {res4:?}") + }; assert_eq!( - *ent2 - .expect("should have entry") - .into_value() - .read() - .unwrap(), - vec![1, 2] - ); - assert_eq!( - *ent4 - .expect("should have entry") - .into_value() - .read() - .unwrap(), + *entry.into_value().read().unwrap(), vec![1, 2] // Removed value. ); } diff --git a/src/sync/entry_selector.rs b/src/sync/entry_selector.rs index d94ff419..5e66cb99 100644 --- a/src/sync/entry_selector.rs +++ b/src/sync/entry_selector.rs @@ -52,22 +52,16 @@ where /// - `Op::Put(V)`: Put the new value `V` to the cache. /// - `Op::Remove`: Remove the current cached entry. /// - `Op::Nop`: Do nothing. - /// 3. Return an `(Option, ops::compute::PerformedOp)` as the followings: + /// 3. Return an `ops::compute::CompResult` as the followings: /// - /// | [`Op`] | `Entry` to return | [`PerformedOp`] | - /// |:--------- |:--------------------------- |:----------------------- | - /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | - /// | `Remove` | The _removed_ entry | `Removed` | - /// | `Nop` | The current entry or `None` | `Nop` | - /// - /// **Notes:** - /// - /// - `Op::Put(V)`: `PerformedOp::Updated` is returned when the key already - /// existed in the cache. It is _not_ related to whether the value was actually - /// updated or not. It can be replaced with the same value. - /// - `Op::Remove`: Unlike other ops, the _removed_ entry is returned. If you mix - /// `Remove` with other ops, ensure to check whether the performed op is - /// `Removed` or not. + /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | + /// |:--------- |:--- |:--------------------------- |:------------------------------- | + /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | + /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | + /// | `Remove` | no | `StillNone(Arc)` | | + /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | + /// | `Nop` | no | `StillNone(Arc)` | | + /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # Similar Methods /// @@ -76,9 +70,9 @@ where /// method. /// - If you only want to put, use the [`and_upsert_with`] method. /// - /// [`Entry`]: ../struct.Entry.html + /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html - /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_try_compute_with`]: #method.and_try_compute_with /// @@ -87,8 +81,7 @@ where /// ```rust /// use moka::{ /// sync::Cache, - /// ops::compute::{self, PerformedOp}, - /// Entry, + /// ops::compute::{CompResult, Op}, /// }; /// /// let cache: Cache = Cache::new(100); @@ -99,51 +92,55 @@ where /// fn inclement_or_remove_counter( /// cache: &Cache, /// key: &str, - /// ) -> (Option>, compute::PerformedOp) { + /// ) -> CompResult { /// cache /// .entry(key.to_string()) /// .and_compute_with(|maybe_entry| { /// if let Some(entry) = maybe_entry { /// let counter = entry.into_value(); /// if counter < 2 { - /// compute::Op::Put(counter.saturating_add(1)) // Update + /// Op::Put(counter.saturating_add(1)) // Update /// } else { - /// compute::Op::Remove // Remove + /// Op::Remove /// } /// } else { - /// compute::Op::Put(1) // Insert + /// Op::Put(1) // Insert /// } /// }) /// } /// - /// // This should insert a now counter value 1 to the cache, and return the + /// // This should insert a new counter value 1 to the cache, and return the /// // value with the kind of the operation performed. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key); + /// let CompResult::Inserted(entry) = result else { + /// panic!("`Inserted` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 1); - /// assert_eq!(performed_op, PerformedOp::Inserted); /// /// // This should increment the cached counter value by 1. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key); + /// let CompResult::ReplacedWith(entry) = result else { + /// panic!("`ReplacedWith` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 2); - /// assert_eq!(performed_op, PerformedOp::Updated); /// /// // This should remove the cached counter from the cache, and returns the /// // _removed_ value. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key); + /// let CompResult::Removed(entry) = result else { + /// panic!("`Removed` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 2); - /// assert_eq!(performed_op, PerformedOp::Removed); /// /// // The key should no longer exist. /// assert!(!cache.contains_key(&key)); /// /// // This should start over; insert a new counter value 1 to the cache. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key); + /// let CompResult::Inserted(entry) = result else { + /// panic!("`Inserted` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 1); - /// assert_eq!(performed_op, PerformedOp::Inserted); /// ``` /// /// # Concurrent calls on the same key @@ -152,7 +149,7 @@ where /// serially. That is, `and_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. - pub fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) + pub fn and_compute_with(self, f: F) -> compute::CompResult where F: FnOnce(Option>) -> compute::Op, { @@ -175,23 +172,16 @@ where /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache. /// - `Ok(Op::Remove)`: Remove the current cached entry. /// - `Ok(Op::Nop)`: Do nothing. - /// 4. Return a `Ok((Option, ops::compute::PerformedOp))` as the - /// followings: - /// - /// | [`Op`] | `Entry` to return | [`PerformedOp`] | - /// |:--------- |:--------------------------- |:----------------------- | - /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | - /// | `Remove` | The _removed_ entry | `Removed` | - /// | `Nop` | The current entry or `None` | `Nop` | - /// - /// **Notes:** + /// 4. Return an `Ok(ops::compute::CompResult)` as the followings: /// - /// - `Ok(Op::Put(V))`: `PerformedOp::Updated` is returned when the key already - /// existed in the cache. It is _not_ related to whether the value was actually - /// updated or not. It can be replaced with the same value. - /// - `Ok(Op::Remove)`: Unlike other ops, the _removed_ entry is returned. If you - /// mix `Remove` with other ops, ensure to check whether the performed op is - /// `Removed` or not. + /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | + /// |:--------- |:--- |:--------------------------- |:------------------------------- | + /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | + /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | + /// | `Remove` | no | `StillNone(Arc)` | | + /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | + /// | `Nop` | no | `StillNone(Arc)` | | + /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # Similar Methods /// @@ -199,9 +189,9 @@ where /// the [`and_compute_with`] method. /// - If you only want to put, use the [`and_upsert_with`] method. /// - /// [`Entry`]: ../struct.Entry.html + /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html - /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_compute_with`]: #method.and_compute_with /// @@ -218,10 +208,7 @@ where /// serially. That is, `and_try_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. - pub fn and_try_compute_with( - self, - f: F, - ) -> Result<(Option>, compute::PerformedOp), E> + pub fn and_try_compute_with(self, f: F) -> Result, E> where F: FnOnce(Option>) -> Result, E>, E: Send + Sync + 'static, @@ -273,7 +260,7 @@ where /// } /// }); /// // It was not an update. - /// assert!(!entry.is_updated()); + /// assert!(!entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 1); /// @@ -287,15 +274,11 @@ where /// } /// }); /// // It was an update. - /// assert!(entry.is_updated()); + /// assert!(entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 2); /// ``` /// - /// Note: The `is_updated` method of the `Entry` returns `true` when the key - /// already existed in the cache. It is not related to whether the value was - /// actually updated or not. It can be replaced with the same value. - /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed @@ -611,22 +594,16 @@ where /// - `Op::Put(V)`: Put the new value `V` to the cache. /// - `Op::Remove`: Remove the current cached entry. /// - `Op::Nop`: Do nothing. - /// 3. Return an `(Option, ops::compute::PerformedOp)` as the followings: + /// 3. Return an `ops::compute::CompResult` as the followings: /// - /// | [`Op`] | `Entry` to return | [`PerformedOp`] | - /// |:--------- |:--------------------------- |:----------------------- | - /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | - /// | `Remove` | The _removed_ entry | `Removed` | - /// | `Nop` | The current entry or `None` | `Nop` | - /// - /// **Notes:** - /// - /// - `Op::Put(V)`: `PerformedOp::Updated` is returned when the key already - /// existed in the cache. It is _not_ related to whether the value was actually - /// updated or not. It can be replaced with the same value. - /// - `Op::Remove`: Unlike other ops, the _removed_ entry is returned. If you mix - /// `Remove` with other ops, ensure to check whether the performed op is - /// `Removed` or not. + /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | + /// |:--------- |:--- |:--------------------------- |:------------------------------- | + /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | + /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | + /// | `Remove` | no | `StillNone(Arc)` | | + /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | + /// | `Nop` | no | `StillNone(Arc)` | | + /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # Similar Methods /// @@ -635,9 +612,9 @@ where /// method. /// - If you only want to put, use the [`and_upsert_with`] method. /// - /// [`Entry`]: ../struct.Entry.html + /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html - /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_try_compute_with`]: #method.and_try_compute_with /// @@ -646,8 +623,7 @@ where /// ```rust /// use moka::{ /// sync::Cache, - /// ops::compute::{self, PerformedOp}, - /// Entry, + /// ops::compute::{CompResult, Op}, /// }; /// /// let cache: Cache = Cache::new(100); @@ -658,51 +634,55 @@ where /// fn inclement_or_remove_counter( /// cache: &Cache, /// key: &str, - /// ) -> (Option>, compute::PerformedOp) { + /// ) -> CompResult { /// cache /// .entry_by_ref(key) /// .and_compute_with(|maybe_entry| { /// if let Some(entry) = maybe_entry { /// let counter = entry.into_value(); /// if counter < 2 { - /// compute::Op::Put(counter.saturating_add(1)) // Update + /// Op::Put(counter.saturating_add(1)) // Update /// } else { - /// compute::Op::Remove // Remove + /// Op::Remove /// } /// } else { - /// compute::Op::Put(1) // Insert + /// Op::Put(1) // Insert /// } /// }) /// } /// /// // This should insert a now counter value 1 to the cache, and return the /// // value with the kind of the operation performed. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key); + /// let CompResult::Inserted(entry) = result else { + /// panic!("`Inserted` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 1); - /// assert_eq!(performed_op, PerformedOp::Inserted); /// /// // This should increment the cached counter value by 1. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key); + /// let CompResult::ReplacedWith(entry) = result else { + /// panic!("`ReplacedWith` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 2); - /// assert_eq!(performed_op, PerformedOp::Updated); /// /// // This should remove the cached counter from the cache, and returns the /// // _removed_ value. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key); + /// let CompResult::Removed(entry) = result else { + /// panic!("`Removed` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 2); - /// assert_eq!(performed_op, PerformedOp::Removed); /// /// // The key should no longer exist. /// assert!(!cache.contains_key(&key)); /// /// // This should start over; insert a new counter value 1 to the cache. - /// let (maybe_entry, performed_op) = inclement_or_remove_counter(&cache, &key); - /// let entry = maybe_entry.expect("An entry should be returned"); + /// let result = inclement_or_remove_counter(&cache, &key); + /// let CompResult::Inserted(entry) = result else { + /// panic!("`Inserted` should be returned: {result:?}"); + /// }; /// assert_eq!(entry.into_value(), 1); - /// assert_eq!(performed_op, PerformedOp::Inserted); /// ``` /// /// # Concurrent calls on the same key @@ -711,7 +691,7 @@ where /// serially. That is, `and_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. - pub fn and_compute_with(self, f: F) -> (Option>, compute::PerformedOp) + pub fn and_compute_with(self, f: F) -> compute::CompResult where F: FnOnce(Option>) -> compute::Op, { @@ -734,23 +714,16 @@ where /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache. /// - `Ok(Op::Remove)`: Remove the current cached entry. /// - `Ok(Op::Nop)`: Do nothing. - /// 4. Return a `Ok((Option, ops::compute::PerformedOp))` as the - /// followings: - /// - /// | [`Op`] | `Entry` to return | [`PerformedOp`] | - /// |:--------- |:--------------------------- |:----------------------- | - /// | `Put(V)` | The inserted/updated entry | `Inserted` or `Updated` | - /// | `Remove` | The _removed_ entry | `Removed` | - /// | `Nop` | The current entry or `None` | `Nop` | - /// - /// **Notes:** + /// 4. Return an `Ok(ops::compute::CompResult)` as the followings: /// - /// - `Ok(Op::Put(V))`: `PerformedOp::Updated` is returned when the key already - /// existed in the cache. It is _not_ related to whether the value was actually - /// updated or not. It can be replaced with the same value. - /// - `Ok(Op::Remove)`: Unlike other ops, the _removed_ entry is returned. If you - /// mix `Remove` with other ops, ensure to check whether the performed op is - /// `Removed` or not. + /// | [`Op`] | [`Entry`] already exists? | [`CompResult`] | Notes | + /// |:--------- |:--- |:--------------------------- |:------------------------------- | + /// | `Put(V)` | no | `Inserted(Entry)` | The new entry is returned. | + /// | `Put(V)` | yes | `ReplacedWith(Entry)` | The new entry is returned. | + /// | `Remove` | no | `StillNone(Arc)` | | + /// | `Remove` | yes | `Removed(Entry)` | The removed entry is returned. | + /// | `Nop` | no | `StillNone(Arc)` | | + /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// /// # Similar Methods /// @@ -758,9 +731,9 @@ where /// the [`and_compute_with`] method. /// - If you only want to put, use the [`and_upsert_with`] method. /// - /// [`Entry`]: ../struct.Entry.html + /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html - /// [`PerformedOp`]: ../ops/compute/enum.PerformedOp.html + /// [`CompResult`]: ../ops/compute/enum.CompResult.html /// [`and_upsert_with`]: #method.and_upsert_with /// [`and_compute_with`]: #method.and_compute_with /// @@ -777,10 +750,7 @@ where /// serially. That is, `and_try_compute_with` calls on the same key never run /// concurrently. The calls are serialized by the order of their invocation. It /// uses a key-level lock to achieve this. - pub fn and_try_compute_with( - self, - f: F, - ) -> Result<(Option>, compute::PerformedOp), E> + pub fn and_try_compute_with(self, f: F) -> Result, E> where F: FnOnce(Option>) -> Result, E>, E: Send + Sync + 'static, @@ -832,7 +802,7 @@ where /// } /// }); /// // It was not an update. - /// assert!(!entry.is_updated()); + /// assert!(!entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 1); /// @@ -846,15 +816,11 @@ where /// } /// }); /// // It was an update. - /// assert!(entry.is_updated()); + /// assert!(entry.is_old_value_replaced()); /// assert_eq!(entry.key(), &key); /// assert_eq!(entry.into_value(), 2); /// ``` /// - /// Note: The `is_updated` method of the `Entry` returns `true` when the key - /// already existed in the cache. It is not related to whether the value was - /// actually updated or not. It can be replaced with the same value. - /// /// # Concurrent calls on the same key /// /// This method guarantees that concurrent calls on the same key are executed diff --git a/src/sync/value_initializer.rs b/src/sync/value_initializer.rs index d9832ec7..7ca35d74 100644 --- a/src/sync/value_initializer.rs +++ b/src/sync/value_initializer.rs @@ -7,7 +7,10 @@ use std::{ }; use triomphe::Arc as TrioArc; -use crate::{ops::compute, Entry}; +use crate::{ + ops::compute::{CompResult, Op}, + Entry, +}; use super::{ComputeNone, OptionallyNone}; @@ -57,14 +60,6 @@ pub(crate) enum InitResult { InitErr(Arc), } -pub(crate) enum ComputeResult { - Inserted(V), - Updated(V), - Removed(V), - Nop(Option), - EvalErr(E), -} - pub(crate) struct ValueInitializer { // TypeId is the type ID of the concrete error type of generic type E in the // try_get_with method. We use the type ID as a part of the key to ensure that @@ -198,13 +193,13 @@ where /// Panics if the `init` closure has been panicked. pub(crate) fn try_compute<'a, C, F, O, E>( &'a self, - c_key: &Arc, + c_key: Arc, c_hash: u64, cache: &C, f: F, - post_init: fn(O) -> Result, E>, + post_init: fn(O) -> Result, E>, allow_nop: bool, - ) -> ComputeResult + ) -> Result, E> where V: 'static, C: GetOrInsert + Send + 'a, @@ -212,10 +207,9 @@ where E: Send + Sync + 'static, { use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; - use ComputeResult::{EvalErr, Inserted, Nop, Removed, Updated}; let type_id = TypeId::of::(); - let (w_key, w_hash) = self.waiter_key_hash(c_key, type_id); + let (w_key, w_hash) = self.waiter_key_hash(&c_key, type_id); let waiter = TrioArc::new(RwLock::new(WaiterValue::Computing)); // NOTE: We have to acquire a write lock before `try_insert_waiter`, // so that any concurrent attempt will get our lock and wait on it. @@ -247,7 +241,7 @@ where // Our waiter was inserted. // Get the current value. - let maybe_entry = cache.get_entry(c_key, c_hash); + let maybe_entry = cache.get_entry(&c_key, c_hash); let maybe_value = if allow_nop { maybe_entry.as_ref().map(|ent| ent.value().clone()) } else { @@ -257,34 +251,11 @@ where // Evaluate the `f` closure. Catching panic is safe here as we will not // evaluate the closure again. - match catch_unwind(AssertUnwindSafe(|| f(maybe_entry))) { + let output = match catch_unwind(AssertUnwindSafe(|| f(maybe_entry))) { // Evaluated. - Ok(op) => { - let init_res = match post_init(op) { - Ok(op) => match op { - compute::Op::Nop => Nop(maybe_value), - compute::Op::Put(value) => { - cache.insert(Arc::clone(c_key), c_hash, value.clone()); - if entry_existed { - Updated(value) - } else { - Inserted(value) - } - } - compute::Op::Remove => { - let maybe_prev_v = cache.remove(c_key, c_hash); - if let Some(prev_v) = maybe_prev_v { - Removed(prev_v) - } else { - Nop(None) - } - } - }, - Err(e) => EvalErr(e), - }; + Ok(output) => { *lock = WaiterValue::ReadyNone; - self.remove_waiter(w_key, w_hash); - init_res + output } // Panicked. Err(payload) => { @@ -293,7 +264,54 @@ where self.remove_waiter(w_key, w_hash); resume_unwind(payload); } - } + }; + + let op = match post_init(output) { + Ok(op) => op, + Err(e) => { + self.remove_waiter(w_key, w_hash); + return Err(e); + } + }; + + let result = match op { + Op::Nop => { + if let Some(value) = maybe_value { + Ok(CompResult::Unchanged(Entry::new( + Some(c_key), + value, + false, + false, + ))) + } else { + Ok(CompResult::StillNone(c_key)) + } + } + Op::Put(value) => { + cache.insert(Arc::clone(&c_key), c_hash, value.clone()); + if entry_existed { + crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(c_key), value, true, true); + Ok(CompResult::ReplacedWith(entry)) + } else { + let entry = Entry::new(Some(c_key), value, true, false); + Ok(CompResult::Inserted(entry)) + } + } + Op::Remove => { + let maybe_prev_v = cache.remove(&c_key, c_hash); + if let Some(prev_v) = maybe_prev_v { + let entry = Entry::new(Some(c_key), prev_v, false, false); + crossbeam_epoch::pin().flush(); + Ok(CompResult::Removed(entry)) + } else { + Ok(CompResult::StillNone(c_key)) + } + } + }; + self.remove_waiter(w_key, w_hash); + result + // The lock will be unlocked here. } @@ -319,19 +337,17 @@ where } /// The `post_init` function for the `and_upsert_with` method of cache. - pub(crate) fn post_init_for_upsert_with(value: V) -> Result, ()> { - Ok(compute::Op::Put(value)) + pub(crate) fn post_init_for_upsert_with(value: V) -> Result, ()> { + Ok(Op::Put(value)) } /// The `post_init` function for the `and_compute_with` method of cache. - pub(crate) fn post_init_for_compute_with(op: compute::Op) -> Result, ()> { + pub(crate) fn post_init_for_compute_with(op: Op) -> Result, ()> { Ok(op) } /// The `post_init` function for the `and_try_compute_with` method of cache. - pub(crate) fn post_init_for_try_compute_with( - op: Result, E>, - ) -> Result, E> + pub(crate) fn post_init_for_try_compute_with(op: Result, E>) -> Result, E> where E: Send + Sync + 'static, { From 472addce1dfdeaa6194c9389c43918d35c51e1ba Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Mon, 8 Jan 2024 23:04:51 +0800 Subject: [PATCH 16/16] Compute API - Brush up the documents --- examples/try_append_value_async.rs | 4 ++-- examples/try_append_value_sync.rs | 4 ++-- src/future/entry_selector.rs | 36 +++++++++++++++--------------- src/future/value_initializer.rs | 2 +- src/sync/entry_selector.rs | 22 +++++++++--------- src/sync/value_initializer.rs | 3 +-- 6 files changed, 35 insertions(+), 36 deletions(-) diff --git a/examples/try_append_value_async.rs b/examples/try_append_value_async.rs index 0e9304d8..d0d94f59 100644 --- a/examples/try_append_value_async.rs +++ b/examples/try_append_value_async.rs @@ -1,5 +1,5 @@ -//! This example demonstrates how to append an `i32` value to a cached `Vec` -//! value. It uses the `and_upsert_with` method of `Cache`. +//! This example demonstrates how to append a `char` to a cached `Vec` value. +//! It uses the `and_upsert_with` method of `Cache`. use std::{io::Cursor, pin::Pin, sync::Arc}; diff --git a/examples/try_append_value_sync.rs b/examples/try_append_value_sync.rs index 8c07e743..81eb6be0 100644 --- a/examples/try_append_value_sync.rs +++ b/examples/try_append_value_sync.rs @@ -1,5 +1,5 @@ -//! This example demonstrates how to append an `i32` value to a cached `Vec` -//! value. It uses the `and_upsert_with` method of `Cache`. +//! This example demonstrates how to append a `char` to a cached `Vec` value. +//! It uses the `and_upsert_with` method of `Cache`. use std::{ io::{self, Cursor, Read}, diff --git a/src/future/entry_selector.rs b/src/future/entry_selector.rs index 0fe106b4..f1db19f0 100644 --- a/src/future/entry_selector.rs +++ b/src/future/entry_selector.rs @@ -65,12 +65,12 @@ where /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// - /// # Similar Methods + /// # See Also /// /// - If you want the `Future` resolve to `Result>` instead of `Op`, and - /// upsert only when resolved to `Ok(V)`, use the [`and_try_compute_with`] - /// method. - /// - If you only want to put, use the [`and_upsert_with`] method. + /// modify entry only when resolved to `Ok(V)`, use the + /// [`and_try_compute_with`] method. + /// - If you only want to update or insert, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html @@ -84,7 +84,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.12", features = ["future"] } + /// // moka = { version = "0.12.3", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::{ @@ -201,11 +201,11 @@ where /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// - /// # Similar Methods + /// # See Also /// /// - If you want the `Future` resolve to `Op` instead of `Result>`, use /// the [`and_compute_with`] method. - /// - If you only want to put, use the [`and_upsert_with`] method. + /// - If you only want to update or insert, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html @@ -251,7 +251,7 @@ where /// 3. Upsert the new value to the cache. /// 4. Return the `Entry` having the upserted value. /// - /// # Similar Methods + /// # See Also /// /// - If you want to optionally upsert, that is to upsert only when certain /// conditions meet, use the [`and_compute_with`] method. @@ -269,7 +269,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.12", features = ["future"] } + /// // moka = { version = "0.12.3", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; @@ -705,12 +705,12 @@ where /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// - /// # Similar Methods + /// # See Also /// /// - If you want the `Future` resolve to `Result>` instead of `Op`, and - /// upsert only when resolved to `Ok(V)`, use the [`and_try_compute_with`] - /// method. - /// - If you only want to put, use the [`and_upsert_with`] method. + /// modify entry only when resolved to `Ok(V)`, use the + /// [`and_try_compute_with`] method. + /// - If you only want to update or insert, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html @@ -724,7 +724,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.12", features = ["future"] } + /// // moka = { version = "0.12.3", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::{ @@ -841,11 +841,11 @@ where /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// - /// # Similar Methods + /// # See Also /// /// - If you want the `Future` resolve to `Op` instead of `Result>`, use /// the [`and_compute_with`] method. - /// - If you only want to put, use the [`and_upsert_with`] method. + /// - If you only want to update or insert, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html @@ -891,7 +891,7 @@ where /// 3. Upsert the new value to the cache. /// 4. Return the `Entry` having the upserted value. /// - /// # Similar Methods + /// # See Also /// /// - If you want to optionally upsert, that is to upsert only when certain /// conditions meet, use the [`and_compute_with`] method. @@ -909,7 +909,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.12", features = ["future"] } + /// // moka = { version = "0.12.3", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; diff --git a/src/future/value_initializer.rs b/src/future/value_initializer.rs index 831f29b0..b7392750 100644 --- a/src/future/value_initializer.rs +++ b/src/future/value_initializer.rs @@ -408,8 +408,8 @@ where Op::Remove => { let maybe_prev_v = cache.remove(&c_key, c_hash).await; if let Some(prev_v) = maybe_prev_v { - let entry = Entry::new(Some(c_key), prev_v, false, false); crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(c_key), prev_v, false, false); Ok(CompResult::Removed(entry)) } else { Ok(CompResult::StillNone(c_key)) diff --git a/src/sync/entry_selector.rs b/src/sync/entry_selector.rs index 5e66cb99..2d156176 100644 --- a/src/sync/entry_selector.rs +++ b/src/sync/entry_selector.rs @@ -63,12 +63,12 @@ where /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// - /// # Similar Methods + /// # See Also /// /// - If you want the `Future` resolve to `Result>` instead of `Op`, and - /// upsert only when resolved to `Ok(V)`, use the [`and_try_compute_with`] - /// method. - /// - If you only want to put, use the [`and_upsert_with`] method. + /// modify entry only when resolved to `Ok(V)`, use the + /// [`and_try_compute_with`] method. + /// - If you only want to update or insert, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html @@ -183,7 +183,7 @@ where /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// - /// # Similar Methods + /// # See Also /// /// - If you want the `Future` resolve to `Op` instead of `Result>`, use /// the [`and_compute_with`] method. @@ -230,7 +230,7 @@ where /// 2. Upsert the new value to the cache. /// 3. Return the `Entry` having the upserted value. /// - /// # Similar Methods + /// # See Also /// /// - If you want to optionally upsert, that is to upsert only when certain /// conditions meet, use the [`and_compute_with`] method. @@ -605,12 +605,12 @@ where /// | `Nop` | no | `StillNone(Arc)` | | /// | `Nop` | yes | `Unchanged(Entry)` | The existing entry is returned. | /// - /// # Similar Methods + /// # See Also /// /// - If you want the `Future` resolve to `Result>` instead of `Op`, and - /// upsert only when resolved to `Ok(V)`, use the [`and_try_compute_with`] - /// method. - /// - If you only want to put, use the [`and_upsert_with`] method. + /// modify entry only when resolved to `Ok(V)`, use the + /// [`and_try_compute_with`] method. + /// - If you only want to update or insert, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html @@ -729,7 +729,7 @@ where /// /// - If you want the `Future` resolve to `Op` instead of `Result>`, use /// the [`and_compute_with`] method. - /// - If you only want to put, use the [`and_upsert_with`] method. + /// - If you only want to update or insert, use the [`and_upsert_with`] method. /// /// [`Entry`]: ../struct.Entry.html /// [`Op`]: ../ops/compute/enum.Op.html diff --git a/src/sync/value_initializer.rs b/src/sync/value_initializer.rs index 7ca35d74..e802317a 100644 --- a/src/sync/value_initializer.rs +++ b/src/sync/value_initializer.rs @@ -170,7 +170,6 @@ where } Err(e) => { let err: ErrorObject = Arc::new(e); - *lock = WaiterValue::Ready(Err(Arc::clone(&err))); InitResult::InitErr(err.downcast().unwrap()) } @@ -301,8 +300,8 @@ where Op::Remove => { let maybe_prev_v = cache.remove(&c_key, c_hash); if let Some(prev_v) = maybe_prev_v { - let entry = Entry::new(Some(c_key), prev_v, false, false); crossbeam_epoch::pin().flush(); + let entry = Entry::new(Some(c_key), prev_v, false, false); Ok(CompResult::Removed(entry)) } else { Ok(CompResult::StillNone(c_key))