Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add ArrayQueue #189

Merged
merged 5 commits into from Jan 21, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions crossbeam-channel/src/flavors/array.rs
Original file line number Diff line number Diff line change
Expand Up @@ -300,8 +300,7 @@ impl<T> Channel<T> {
atomic::fence(Ordering::SeqCst);
let tail = self.tail.load(Ordering::Relaxed);

// If the tail lags one lap behind the head as well, that means the channel is
// empty.
// If the tail equals the head, that means the channel is empty.
if (tail & !self.mark_bit) == head {
// If the channel is disconnected...
if tail & self.mark_bit != 0 {
Expand Down
7 changes: 1 addition & 6 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,17 +106,12 @@ cfg_if! {
extern crate lazy_static;
extern crate parking_lot;

mod ms_queue;
mod seg_queue;
mod sharded_lock;
mod treiber_stack;
mod wait_group;

/// Concurrent queues.
pub mod queue {
pub use ms_queue::MsQueue;
pub use seg_queue::SegQueue;
}
pub mod queue;

/// Concurrent stacks.
pub mod stack {
Expand Down
367 changes: 367 additions & 0 deletions src/queue/array_queue.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,367 @@
//! Bounded multi-producer multi-consumer queue.
//!
//! The implementation is based on Dmitry Vyukov's bounded MPMC queue.
//!
//! Source:
//! - http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
//! - https://docs.google.com/document/d/1yIAYmbvL3JxOKOjuCyon7JhW4cSv1wy5hC0ApeGMV9s/pub
//!
//! Copyright & License:
//! - Copyright (c) 2010-2011 Dmitry Vyukov
//! - Simplified BSD License and Apache License, Version 2.0
//! - http://www.1024cores.net/home/code-license

use std::cell::UnsafeCell;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
use std::ptr;
use std::sync::atomic::{self, AtomicUsize, Ordering};
use std::thread;

use queue::{PopError, PushError};
use utils::CachePadded;

/// A slot in a queue.
struct Slot<T> {
/// The current stamp.
///
/// If the stamp equals the tail, this node will be next written to. If it equals the head,
/// this node will be next read from.
stamp: AtomicUsize,

/// The value in this slot.
value: UnsafeCell<T>,
}

/// A bounded multi-producer multi-consumer queue.
pub struct ArrayQueue<T> {
/// The head of the queue.
///
/// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a
/// single `usize`. The lower bits represent the index, while the upper bits represent the lap.
///
/// Values are popped from the head of the queue.
head: CachePadded<AtomicUsize>,

/// The tail of the queue.
///
/// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a
/// single `usize`. The lower bits represent the index, while the upper bits represent the lap.
///
/// Values are pushed into the tail of the queue.
tail: CachePadded<AtomicUsize>,

/// The buffer holding slots.
buffer: *mut Slot<T>,

/// The queue capacity.
cap: usize,

/// A stamp with the value of `{ lap: 1, index: 0 }`.
one_lap: usize,

/// Indicates that dropping an `ArrayQueue<T>` may drop values of type `T`.
_marker: PhantomData<T>,
}

unsafe impl<T: Send> Sync for ArrayQueue<T> {}
unsafe impl<T: Send> Send for ArrayQueue<T> {}

impl<T> ArrayQueue<T> {
/// Creates a new queue of capacity `cap`.
///
/// # Panics
///
/// Panics if the capacity is zero.
pub fn new(cap: usize) -> ArrayQueue<T> {
assert!(cap > 0, "capacity must be positive");

// Head is initialized to `{ lap: 0, index: 0 }`.
// Tail is initialized to `{ lap: 0, index: 0 }`.
let head = 0;
let tail = 0;

// Allocate a buffer of `cap` slots.
let buffer = {
let mut v = Vec::<Slot<T>>::with_capacity(cap);
let ptr = v.as_mut_ptr();
mem::forget(v);
ptr
};

// Initialize stamps in the slots.
for i in 0..cap {
unsafe {
// Set the stamp to `{ lap: 0, index: i }`.
let slot = buffer.add(i);
ptr::write(&mut (*slot).stamp, AtomicUsize::new(i));
}
}

// One lap is the smallest power of two greater than `cap`.
let one_lap = (cap + 1).next_power_of_two();

ArrayQueue {
buffer,
cap,
one_lap,
head: CachePadded::new(AtomicUsize::new(head)),
tail: CachePadded::new(AtomicUsize::new(tail)),
_marker: PhantomData,
}
}

/// Attempts to push `value` into the queue.
pub fn push(&self, value: T) -> Result<(), PushError<T>> {
let mut backoff = Backoff::new();
let mut tail = self.tail.load(Ordering::Relaxed);

loop {
// Deconstruct the tail.
let index = tail & (self.one_lap - 1);
let lap = tail & !(self.one_lap - 1);

// Inspect the corresponding slot.
let slot = unsafe { &*self.buffer.add(index) };
let stamp = slot.stamp.load(Ordering::Acquire);

// If the tail and the stamp match, we may attempt to push.
if tail == stamp {
let new_tail = if index + 1 < self.cap {
// Same lap, incremented index.
// Set to `{ lap: lap, index: index + 1 }`.
tail + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), index: 0 }`.
lap.wrapping_add(self.one_lap)
};

// Try moving the tail.
match self
.tail
.compare_exchange_weak(tail, new_tail, Ordering::SeqCst, Ordering::Relaxed)
{
Ok(_) => {
// Write the value into the slot and update the stamp.
unsafe { slot.value.get().write(value); }
slot.stamp.store(tail + 1, Ordering::Release);
return Ok(());
}
Err(t) => {
tail = t;
backoff.spin();
}
}
} else if stamp.wrapping_add(self.one_lap) == tail + 1 {
atomic::fence(Ordering::SeqCst);
let head = self.head.load(Ordering::Relaxed);

// If the head lags one lap behind the tail as well...
if head.wrapping_add(self.one_lap) == tail {
// ...then the queue is full.
return Err(PushError(value));
}

backoff.spin();
tail = self.tail.load(Ordering::Relaxed);
} else {
// Snooze because we need to wait for the stamp to get updated.
backoff.snooze();
tail = self.tail.load(Ordering::Relaxed);
}
}
}

/// Attempts to pop a value from the queue.
pub fn pop(&self) -> Result<T, PopError> {
let mut backoff = Backoff::new();
let mut head = self.head.load(Ordering::Relaxed);

loop {
// Deconstruct the head.
let index = head & (self.one_lap - 1);
let lap = head & !(self.one_lap - 1);

// Inspect the corresponding slot.
let slot = unsafe { &*self.buffer.add(index) };
let stamp = slot.stamp.load(Ordering::Acquire);

// If the the stamp is ahead of the head by 1, we may attempt to pop.
if head + 1 == stamp {
let new = if index + 1 < self.cap {
// Same lap, incremented index.
// Set to `{ lap: lap, index: index + 1 }`.
head + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), index: 0 }`.
lap.wrapping_add(self.one_lap)
};

// Try moving the head.
match self
.head
.compare_exchange_weak(head, new, Ordering::SeqCst, Ordering::Relaxed)
{
Ok(_) => {
// Read the value from the slot and update the stamp.
let msg = unsafe { slot.value.get().read() };
slot.stamp.store(head.wrapping_add(self.one_lap), Ordering::Release);
return Ok(msg);
}
Err(h) => {
head = h;
backoff.spin();
}
}
} else if stamp == head {
atomic::fence(Ordering::SeqCst);
let tail = self.tail.load(Ordering::Relaxed);

// If the tail equals the head, that means the channel is empty.
if tail == head {
return Err(PopError);
}

backoff.spin();
head = self.head.load(Ordering::Relaxed);
} else {
// Snooze because we need to wait for the stamp to get updated.
backoff.snooze();
head = self.head.load(Ordering::Relaxed);
}
}
}

/// Returns the capacity of the queue.
pub fn capacity(&self) -> usize {
self.cap
}

/// Returns `true` if the queue is empty.
pub fn is_empty(&self) -> bool {
let head = self.head.load(Ordering::SeqCst);
let tail = self.tail.load(Ordering::SeqCst);

// Is the tail lagging one lap behind head?
// Is the tail equal to the head?
//
// Note: If the head changes just before we load the tail, that means there was a moment
// when the channel was not empty, so it is safe to just return `false`.
tail == head
}

/// Returns `true` if the queue is full.
pub fn is_full(&self) -> bool {
let tail = self.tail.load(Ordering::SeqCst);
let head = self.head.load(Ordering::SeqCst);

// Is the head lagging one lap behind tail?
//
// Note: If the tail changes just before we load the head, that means there was a moment
// when the queue was not full, so it is safe to just return `false`.
head.wrapping_add(self.one_lap) == tail
}

/// Returns the current number of values in the queue.
pub fn len(&self) -> usize {
loop {
// Load the tail, then load the head.
let tail = self.tail.load(Ordering::SeqCst);
let head = self.head.load(Ordering::SeqCst);

// If the tail didn't change, we've got consistent values to work with.
if self.tail.load(Ordering::SeqCst) == tail {
let hix = head & (self.one_lap - 1);
let tix = tail & (self.one_lap - 1);

return if hix < tix {
tix - hix
} else if hix > tix {
self.cap - hix + tix
} else if tail == head {
0
} else {
self.cap
};
}
}
}
}

impl<T> Drop for ArrayQueue<T> {
fn drop(&mut self) {
// Get the index of the head.
let hix = self.head.load(Ordering::Relaxed) & (self.one_lap - 1);

// Loop over all slots that hold a message and drop them.
for i in 0..self.len() {
// Compute the index of the next slot holding a message.
let index = if hix + i < self.cap {
hix + i
} else {
hix + i - self.cap
};

unsafe {
self.buffer.add(index).drop_in_place();
}
}

// Finally, deallocate the buffer, but don't run any destructors.
unsafe {
Vec::from_raw_parts(self.buffer, 0, self.cap);
}
}
}

impl<T> fmt::Debug for ArrayQueue<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("ArrayQueue { .. }")
}
}

/// A counter that performs exponential backoff in spin loops.
struct Backoff(u32);

impl Backoff {
/// Creates a new `Backoff`.
#[inline]
pub fn new() -> Backoff {
Backoff(0)
}

/// Backs off in a spin loop.
///
/// This method may yield the current processor. Use it in lock-free retry loops.
#[inline]
pub fn spin(&mut self) {
for _ in 0..1 << self.0.min(6) {
atomic::spin_loop_hint();
}
self.0 = self.0.wrapping_add(1);
}

/// Backs off in a wait loop.
///
/// Returns `true` if snoozing has reached a threshold where we should consider parking the
/// thread instead.
///
/// This method may yield the current processor or the current thread. Use it when waiting on a
/// resource.
#[inline]
pub fn snooze(&mut self) -> bool {
if self.0 <= 6 {
for _ in 0..1 << self.0 {
atomic::spin_loop_hint();
}
} else {
thread::yield_now();
}

self.0 = self.0.wrapping_add(1);
self.0 <= 10
}
}
Loading