Skip to content

Commit

Permalink
uses atomics for read-only accounts cache entry index (#32518)
Browse files Browse the repository at this point in the history
Using atomics for entry indices allows load function to use self.cache.get
instead of get_mut which reduces lock contention on the respective
dash-map shard.
  • Loading branch information
behzadnouri authored Aug 7, 2023
1 parent c2dec25 commit b9a2030
Showing 1 changed file with 29 additions and 9 deletions.
38 changes: 29 additions & 9 deletions runtime/src/read_only_accounts_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use {
pubkey::Pubkey,
},
std::sync::{
atomic::{AtomicU64, AtomicUsize, Ordering},
atomic::{AtomicU32, AtomicU64, AtomicUsize, Ordering},
Mutex,
},
};
Expand All @@ -23,7 +23,7 @@ type ReadOnlyCacheKey = (Pubkey, Slot);
#[derive(Debug)]
struct ReadOnlyAccountCacheEntry {
account: AccountSharedData,
index: Index, // Index of the entry in the eviction queue.
index: AtomicU32, // Index of the entry in the eviction queue.
}

#[derive(Debug)]
Expand Down Expand Up @@ -77,7 +77,7 @@ impl ReadOnlyAccountsCache {
pub(crate) fn load(&self, pubkey: Pubkey, slot: Slot) -> Option<AccountSharedData> {
let (account, load_us) = measure_us!({
let key = (pubkey, slot);
let Some(mut entry) = self.cache.get_mut(&key) else {
let Some(entry) = self.cache.get(&key) else {
self.misses.fetch_add(1, Ordering::Relaxed);
return None;
};
Expand All @@ -86,8 +86,8 @@ impl ReadOnlyAccountsCache {
// so that another thread cannot write to the same key.
{
let mut queue = self.queue.lock().unwrap();
queue.remove(entry.index);
entry.index = queue.insert_last(key);
queue.remove(entry.index());
entry.set_index(queue.insert_last(key));
}
let account = entry.account.clone();
drop(entry);
Expand All @@ -113,7 +113,7 @@ impl ReadOnlyAccountsCache {
// Insert the entry at the end of the queue.
let mut queue = self.queue.lock().unwrap();
let index = queue.insert_last(key);
entry.insert(ReadOnlyAccountCacheEntry { account, index });
entry.insert(ReadOnlyAccountCacheEntry::new(account, index));
}
Entry::Occupied(mut entry) => {
let entry = entry.get_mut();
Expand All @@ -122,8 +122,8 @@ impl ReadOnlyAccountsCache {
entry.account = account;
// Move the entry to the end of the queue.
let mut queue = self.queue.lock().unwrap();
queue.remove(entry.index);
entry.index = queue.insert_last(key);
queue.remove(entry.index());
entry.set_index(queue.insert_last(key));
}
};
// Evict entries from the front of the queue.
Expand All @@ -143,7 +143,7 @@ impl ReadOnlyAccountsCache {
// self.queue should be modified only after removing the entry from the
// cache, so that this is still safe if another thread writes to the
// same key.
self.queue.lock().unwrap().remove(entry.index);
self.queue.lock().unwrap().remove(entry.index());
let account_size = self.account_size(&entry.account);
self.data_size.fetch_sub(account_size, Ordering::Relaxed);
Some(entry.account)
Expand All @@ -167,6 +167,26 @@ impl ReadOnlyAccountsCache {
}
}

impl ReadOnlyAccountCacheEntry {
fn new(account: AccountSharedData, index: Index) -> Self {
let index = unsafe { std::mem::transmute::<Index, u32>(index) };
let index = AtomicU32::new(index);
Self { account, index }
}

#[inline]
fn index(&self) -> Index {
let index = self.index.load(Ordering::Relaxed);
unsafe { std::mem::transmute::<u32, Index>(index) }
}

#[inline]
fn set_index(&self, index: Index) {
let index = unsafe { std::mem::transmute::<Index, u32>(index) };
self.index.store(index, Ordering::Relaxed);
}
}

#[cfg(test)]
mod tests {
use {
Expand Down

0 comments on commit b9a2030

Please sign in to comment.