Skip to content

Commit

Permalink
fix typos
Browse files Browse the repository at this point in the history
  • Loading branch information
ibraheemdev committed Jul 1, 2024
1 parent abb1600 commit 230c776
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 38 deletions.
6 changes: 3 additions & 3 deletions src/map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ unsafe impl<K, V, S: Sync> Sync for HashMap<K, V, S> {}
/// use std::collections::hash_map::RandomState;
///
/// let map: HashMap<i32, i32> = HashMap::builder()
/// // set the inital capacity
/// // set the initial capacity
/// .capacity(2048)
/// // set the hasher
/// .hasher(RandomState::new())
Expand Down Expand Up @@ -162,7 +162,7 @@ impl Default for ResizeMode {
impl<K, V> HashMap<K, V> {
/// Creates an empty `HashMap`.
///
/// The hash map is initally crated with a capacity of 0, so it will not allocate
/// The hash map is initially crated with a capacity of 0, so it will not allocate
/// until it is first inserted into.
///
/// # Examples
Expand Down Expand Up @@ -313,7 +313,7 @@ impl<K, V, S> HashMap<K, V, S> {

/// Returns a pinned reference to the map.
///
/// Unlike [`pin`](HashMap::pin), the retured reference implements `Send`
/// Unlike [`pin`](HashMap::pin), the returned reference implements `Send`
/// and `Sync`, allowing it to be held across `.await` points in multi-threaded
/// schedulers. This is especially useful for iterators.
///
Expand Down
2 changes: 1 addition & 1 deletion src/raw/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use seize::Collector;

use super::State;

// A hash table layed out in a single allocation
// A hash table laid out in a single allocation
#[repr(transparent)]
pub struct RawTable(u8);

Expand Down
53 changes: 19 additions & 34 deletions src/raw/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ pub struct State {
pub status: AtomicU32,
// A thread parker for incremental reclamation.
pub parker: Parker,
// Entries whos retirement has been deferred by later tables.
// Entries whose retirement has been deferred by later tables.
pub deferred: seize::Deferred,
// A pointer to the root collector, valid as long as the map is alive.
pub collector: *const Collector,
Expand Down Expand Up @@ -502,35 +502,20 @@ where
probe.next();
}

match self.root.resize {
ResizeMode::Incremental(_) => {
// went over the probe limit or found a copied entry, trigger a resize
let next_table = self.get_or_alloc_next(None);

// help out with the resize
if help_copy {
self.help_copy(guard, false);
}
// went over the probe limit or found a copied entry, trigger a resize
let next_table = self.get_or_alloc_next(None);

// insert into the next table, racing with the copy of this entry
// TOOD: do we have to mark the entry as copied?
//
// make sure not to help copy again to keep resizing costs constant
self.as_ref(next_table)
.insert_entry(new_entry, replace, false, guard)
}
ResizeMode::Blocking => {
// went over the probe limit or found a copied entry, trigger a resize
self.get_or_alloc_next(None);

// complete the copy
let next_table = self.help_copy(guard, false);

// insert into the next table
self.as_ref(next_table)
.insert_entry(new_entry, replace, help_copy, guard)
}
// help out with the resize, or complete the copy if in blocking mode
if self.root.is_blocking() || help_copy {
self.help_copy(guard, false);
}

// insert into the next table, racing with the copy of this entry
//
// make sure not to help copy again to keep resizing costs constant in
// incremental mode
self.as_ref(next_table)
.insert_entry(new_entry, replace, false, guard)
}

// Replaces the value of an existing entry, returning the previous value if successful.
Expand All @@ -555,7 +540,7 @@ where
Ordering::Release,
Ordering::Relaxed,
) {
// succesful update
// successful update
Ok(_) => unsafe {
// safety: the old value is now unreachable in this table
self.defer_retire(entry, guard);
Expand Down Expand Up @@ -714,7 +699,7 @@ where
Ordering::Release,
Ordering::Relaxed,
) {
// succesful update
// successful update
Ok(_) => unsafe {
// safety: the old value is now unreachable in this table
self.defer_retire(entry, guard);
Expand Down Expand Up @@ -771,7 +756,7 @@ where
}

// wait for the copy to complete
// TODO: race and try to insert our entry in the table based on the entry being copied
// TODO: update the copied entry and race to insert into the table
self.wait_copied(i);

// retry in the new table
Expand Down Expand Up @@ -860,7 +845,7 @@ where
Ordering::Relaxed,
Ordering::Relaxed,
) {
// succesfully deleted
// successfully deleted
Ok(_) => unsafe {
// mark the key as a tombstone to avoid unnecessary reads
// note this might end up being overwritten by a slow h2 store,
Expand Down Expand Up @@ -1410,7 +1395,7 @@ where
}
}

// Allocate the inital table.
// Allocate the initial table.
fn init(&mut self, capacity: Option<usize>) -> bool {
const CAPACITY: usize = 32;

Expand Down Expand Up @@ -1756,7 +1741,7 @@ macro_rules! probe_limit {

use probe_limit;

// Returns an esitmate of he number of entries needed to hold `capacity` elements.
// Returns an estimate of the number of entries needed to hold `capacity` elements.
fn entries_for(capacity: usize) -> usize {
// we should rarely resize before 75%
let capacity = capacity.checked_mul(8).expect("capacity overflow") / 6;
Expand Down

0 comments on commit 230c776

Please sign in to comment.