Skip to content

Commit

Permalink
refactor and document internals
Browse files Browse the repository at this point in the history
  • Loading branch information
ibraheemdev committed Jul 3, 2024
1 parent 2c9fd5a commit b1c8550
Show file tree
Hide file tree
Showing 11 changed files with 1,288 additions and 1,069 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,6 @@ The `Guard` trait supports both local and owned guards. Note the `'guard` lifeti

`papaya` also aims to provide predictable, consistent latency across all operations. Most operations are lock-free, and those that aren't only block under rare and constrained conditions. `papaya` also features [incremental resizing], meaning operations aren't required to block when resizing the hash-table. Predictable latency is an important part of performance that doesn't often show up in benchmarks, but has significant implications for real-world usage.

[benchmarks]: TOOD
[benchmarks]: TODO
[`seize`]: https://docs.rs/seize/latest
[incremental resizing]: https://docs.rs/papaya/latest/papaya/enum.ResizeMode.html
2 changes: 1 addition & 1 deletion src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#![allow(unstable_name_collisions)]
#![allow(unstable_name_collisions, clippy::multiple_bound_locations)]
#![doc = include_str!("../README.md")]

mod map;
Expand Down
26 changes: 13 additions & 13 deletions src/map.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::raw::{self, EntryStatus};
use crate::raw::{self, InsertResult};
use seize::{Collector, Guard, LocalGuard, OwnedGuard};

use std::borrow::Borrow;
Expand Down Expand Up @@ -420,10 +420,10 @@ where
#[inline]
pub fn get<'g, Q>(&self, key: &Q, guard: &'g impl Guard) -> Option<&'g V>
where
K: Borrow<Q> + 'g,
K: Borrow<Q> + 'g, // TODO: this bound is necessary because `raw::HashMap::get` returns the full entry.
Q: Hash + Eq + ?Sized,
{
self.raw.root(guard).get_entry(key, guard).map(|(_, v)| v)
self.raw.root(guard).get(key, guard).map(|(_, v)| v)
}

/// Returns the key-value pair corresponding to the supplied key.
Expand Down Expand Up @@ -452,7 +452,7 @@ where
K: Borrow<Q>,
Q: Hash + Eq + ?Sized,
{
self.raw.root(guard).get_entry(key, guard)
self.raw.root(guard).get(key, guard)
}

/// Inserts a key-value pair into the map.
Expand Down Expand Up @@ -485,9 +485,9 @@ where
#[inline]
pub fn insert<'g>(&self, key: K, value: V, guard: &'g impl Guard) -> Option<&'g V> {
match self.raw.root(guard).insert(key, value, true, guard) {
EntryStatus::Empty(_) => None,
EntryStatus::Replaced(value) => Some(value),
EntryStatus::Error { .. } => unreachable!(),
InsertResult::Inserted(_) => None,
InsertResult::Replaced(value) => Some(value),
InsertResult::Error { .. } => unreachable!(),
}
}

Expand Down Expand Up @@ -518,15 +518,15 @@ where
guard: &'g impl Guard,
) -> Result<&'g V, OccupiedError<'g, V>> {
match self.raw.root(guard).insert(key, value, false, guard) {
EntryStatus::Empty(value) => Ok(value),
EntryStatus::Error {
InsertResult::Inserted(value) => Ok(value),
InsertResult::Error {
current,
not_inserted,
} => Err(OccupiedError {
current,
not_inserted,
}),
EntryStatus::Replaced(_) => unreachable!(),
InsertResult::Replaced(_) => unreachable!(),
}
}

Expand All @@ -549,10 +549,10 @@ where
///
/// let mut map = HashMap::new();
/// map.pin().insert("a", 1);
/// assert_eq!(m.get(&"a"), Some(&1));
/// assert_eq!(map.pin().get(&"a"), Some(&1));
///
/// map.pin().update("a", |v| v + 1);
/// assert_eq!(m.get(&"a"), Some(&2));
/// assert_eq!(map.pin().get(&"a"), Some(&2));
/// ```
pub fn update<'g, F>(&self, key: K, update: F, guard: &'g impl Guard) -> Option<&'g V>
where
Expand Down Expand Up @@ -581,7 +581,7 @@ where
#[inline]
pub fn remove<'g, Q>(&self, key: &Q, guard: &'g impl Guard) -> Option<&'g V>
where
K: Borrow<Q> + 'g,
K: Borrow<Q> + 'g, // TODO: this bound is necessary because `raw::HashMap::remove` returns the full entry.
Q: Hash + Eq + ?Sized,
{
match self.raw.root(guard).remove(key, guard) {
Expand Down
41 changes: 22 additions & 19 deletions src/raw/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,36 +7,32 @@ use seize::Collector;

use super::State;

// A hash table laid out in a single allocation
// A hash-table laid out in a single allocation.
#[repr(transparent)]
pub struct RawTable(u8);

// Safety: seize::Link is the first field (see TableLayout)
// Safety: `seize::Link` is the first field (see `TableLayout`).
unsafe impl seize::AsLink for RawTable {}

#[repr(align(16))]
#[allow(dead_code)]
struct AtomicU128(u128);

// The table allocation's layout
// The layout of the table allocation.
#[repr(C)]
struct TableLayout {
link: seize::Link,
len: usize,
capacity: usize,
state: State,
meta: [AtomicU128; 0],
meta: [AtomicU8; 0],
entries: [AtomicPtr<()>; 0],
}

// Manages a table allocation.
#[repr(C)]
pub struct Table<T> {
// the exposed length of the table
// The exposed length of the table.
pub len: usize,
// the raw table pointer
// The raw table pointer.
pub raw: *mut RawTable,
// the true (padded) table capacity
// The true (padded) table capacity.
capacity: usize,
_t: PhantomData<T>,
}
Expand All @@ -50,24 +46,24 @@ impl<T> Clone for Table<T> {
}

impl<T> Table<T> {
pub fn new(len: usize, collector: &Collector) -> Table<T> {
// Allocate a table with the provided length.
pub fn alloc(len: usize, collector: &Collector) -> Table<T> {
assert!(len.is_power_of_two());
assert!(mem::align_of::<seize::Link>() % mem::align_of::<*mut T>() == 0);

// pad the meta table to fulfill the alignment requirement of an entry
// Pad the meta table to fulfill the alignment requirement of an entry.
let capacity = (len + mem::align_of::<*mut T>() - 1) & !(mem::align_of::<*mut T>() - 1);

unsafe {
let layout = Self::layout(capacity);

// allocate the table, with the entry pointers zeroed
// Allocate the table, zeroing the entries.
let ptr = alloc::alloc_zeroed(layout);

if ptr.is_null() {
alloc::handle_alloc_error(layout);
}

// write the table layout state
// Write the table state.
ptr.cast::<TableLayout>().write(TableLayout {
link: collector.link(),
len,
Expand All @@ -80,7 +76,7 @@ impl<T> Table<T> {
entries: [],
});

// initialize the meta table
// Initialize the meta table.
ptr.add(mem::size_of::<TableLayout>())
.cast::<u8>()
.write_bytes(super::meta::EMPTY, capacity);
Expand All @@ -94,6 +90,7 @@ impl<T> Table<T> {
}
}

// Creates a `Table` from a raw pointer.
#[inline(always)]
pub unsafe fn from_raw(raw: *mut RawTable) -> Table<T> {
if raw.is_null() {
Expand All @@ -115,6 +112,7 @@ impl<T> Table<T> {
}
}

// Returns the metadata entry at the given index.
#[inline(always)]
pub unsafe fn meta(&self, i: usize) -> &AtomicU8 {
debug_assert!(i < self.capacity);
Expand All @@ -125,6 +123,7 @@ impl<T> Table<T> {
.cast::<AtomicU8>()
}

// Returns the entry at the given index.
#[inline(always)]
pub unsafe fn entry(&self, i: usize) -> &AtomicPtr<T> {
let offset = mem::size_of::<TableLayout>()
Expand All @@ -135,20 +134,24 @@ impl<T> Table<T> {
&*self.raw.add(offset).cast::<AtomicPtr<T>>()
}

// Returns a reference to the table state.
pub fn state(&self) -> &State {
unsafe { &(*self.raw.cast::<TableLayout>()).state }
}

// Returns a mutable reference to the table state.
pub fn state_mut(&mut self) -> &mut State {
unsafe { &mut (*self.raw.cast::<TableLayout>()).state }
}

// Deallocate the table.
pub unsafe fn dealloc(table: Table<T>) {
let layout = Self::layout(table.capacity);
ptr::drop_in_place(table.raw.cast::<TableLayout>());
unsafe { alloc::dealloc(table.raw.cast::<u8>(), layout) }
}

// The table layout used for allocation.
fn layout(capacity: usize) -> Layout {
let size = mem::size_of::<TableLayout>()
+ (mem::size_of::<u8>() * capacity) // meta
Expand All @@ -161,10 +164,10 @@ impl<T> Table<T> {
fn layout() {
unsafe {
let collector = seize::Collector::new();
let table: Table<u8> = Table::new(4, &collector);
let table: Table<u8> = Table::alloc(4, &collector);
let table: Table<u8> = Table::from_raw(table.raw);
assert_eq!(table.len, 4);
// padded for pointer alignment
// The capacity is padded for pointer alignment.
assert_eq!(table.capacity, 8);
Table::dealloc(table);
}
Expand Down
Loading

0 comments on commit b1c8550

Please sign in to comment.