diff --git a/README.md b/README.md
index cc246e5..7ea75b1 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,7 @@
The Wasm-Enabled, Elfin Allocator
-
+
@@ -88,7 +88,7 @@ static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
nightly Rust.
- **nightly**: Enable usage of nightly-only Rust features, such as implementing
- the `Alloc` trait (not to be confused with the stable `GlobalAlloc` trait!)
+ the `Allocator` trait (not to be confused with the stable `GlobalAlloc` trait!)
### Implementation Notes and Constraints
diff --git a/README.tpl b/README.tpl
index 159818e..c5dc0d2 100644
--- a/README.tpl
+++ b/README.tpl
@@ -5,7 +5,7 @@
The Wasm-Enabled, Elfin Allocator
-
+
diff --git a/example/src/lib.rs b/example/src/lib.rs
index c696564..0ab662a 100755
--- a/example/src/lib.rs
+++ b/example/src/lib.rs
@@ -21,9 +21,7 @@ static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[panic_handler]
#[no_mangle]
pub fn panic(_info: &::core::panic::PanicInfo) -> ! {
- unsafe {
- ::core::intrinsics::abort();
- }
+ ::core::intrinsics::abort();
}
// Need to provide an allocation error handler which just aborts
@@ -31,9 +29,7 @@ pub fn panic(_info: &::core::panic::PanicInfo) -> ! {
#[alloc_error_handler]
#[no_mangle]
pub extern "C" fn oom(_: ::core::alloc::Layout) -> ! {
- unsafe {
- ::core::intrinsics::abort();
- }
+ ::core::intrinsics::abort();
}
// Needed for non-wasm targets.
diff --git a/test/src/lib.rs b/test/src/lib.rs
index 7f391f4..425b177 100755
--- a/test/src/lib.rs
+++ b/test/src/lib.rs
@@ -1,4 +1,5 @@
#![feature(allocator_api)]
+#![feature(slice_ptr_get)]
extern crate histo;
#[macro_use]
@@ -9,7 +10,7 @@ extern crate rand;
extern crate wee_alloc;
use quickcheck::{Arbitrary, Gen};
-use std::alloc::{Alloc, Layout};
+use std::alloc::{Allocator, Layout};
use std::f64;
use std::fs;
use std::io::Read;
@@ -20,7 +21,7 @@ use std::str::FromStr;
#[derive(Debug, Clone, Copy)]
pub enum Operation {
// Allocate this many bytes.
- Alloc(usize),
+ Allocator(usize),
// Free the n^th allocation we've made, or no-op if there it has already
// been freed.
@@ -41,7 +42,7 @@ impl Operation {
// Zero sized allocation 1/1000 times.
if g.gen_weighted_bool(1000) {
- return Alloc(0);
+ return Allocator(0);
}
// XXX: Keep this synced with `wee_alloc`.
@@ -53,14 +54,14 @@ impl Operation {
if g.gen_weighted_bool(20) {
let n =
g.gen_range(1, 10) * max_small_alloc_size + g.gen_range(0, max_small_alloc_size);
- return Alloc(n);
+ return Allocator(n);
}
// Small allocation.
if g.gen() {
- Alloc(g.gen_range(12, 17))
+ Allocator(g.gen_range(12, 17))
} else {
- Alloc(max_small_alloc_size)
+ Allocator(max_small_alloc_size)
}
}
@@ -80,7 +81,7 @@ impl FromStr for Operation {
let start = "Alloc(".len();
let end = s.len() - "),".len();
let n: usize = s[start..end].parse().map_err(|_| ())?;
- return Ok(Alloc(n));
+ return Ok(Allocator(n));
}
if s.starts_with("Free(") && s.ends_with("),") {
@@ -183,7 +184,13 @@ impl Arbitrary for Operations {
.0
.iter()
.enumerate()
- .filter_map(|(i, op)| if let Alloc(_) = *op { Some(i) } else { None })
+ .filter_map(|(i, op)| {
+ if let Allocator(_) = *op {
+ Some(i)
+ } else {
+ None
+ }
+ })
.collect();
let ops = self.0.clone();
@@ -217,11 +224,11 @@ impl Arbitrary for Operations {
.enumerate()
.filter_map(|(j, op)| {
if i == j {
- if let Alloc(size) = *op {
+ if let Allocator(size) = *op {
if size == 0 {
None
} else {
- Some(Alloc(size / 2))
+ Some(Allocator(size / 2))
}
} else {
Some(*op)
@@ -266,13 +273,13 @@ impl Operations {
handle3.join().expect("Thread 3 Failed");
}
- pub fn run_with_allocator(&self, mut a: A) {
+ pub fn run_with_allocator(&self, a: A) {
let mut allocs = vec![];
for op in self.0.iter().cloned() {
match op {
- Alloc(n) => {
+ Allocator(n) => {
let layout = Layout::from_size_align(n, mem::size_of::()).unwrap();
- allocs.push(match unsafe { a.alloc(layout.clone()) } {
+ allocs.push(match a.allocate(layout.clone()) {
Ok(ptr) => Some((ptr, layout)),
Err(_) => None,
});
@@ -281,7 +288,7 @@ impl Operations {
if let Some(entry) = allocs.get_mut(idx) {
if let Some((ptr, layout)) = entry.take() {
unsafe {
- a.dealloc(ptr, layout);
+ a.deallocate(ptr.as_non_null_ptr(), layout);
}
}
}
@@ -295,7 +302,7 @@ impl Operations {
pub fn size_histogram(&self) -> histo::Histogram {
let mut histogram = histo::Histogram::with_buckets(Self::NUM_BUCKETS);
for op in &self.0 {
- if let Alloc(n) = *op {
+ if let Allocator(n) = *op {
let n = n as f64;
let n = n.log2().round();
histogram.add(n as u64);
@@ -374,9 +381,9 @@ quickcheck! {
let size = size % 65536;
let align = ALIGNS[align % ALIGNS.len()];
- let mut w = &wee_alloc::WeeAlloc::INIT;
+ let w = &wee_alloc::WeeAlloc::INIT;
let layout = Layout::from_size_align(size, align).unwrap();
- let _ = unsafe { w.alloc(layout) };
+ let _ = w.allocate(layout);
}
}
@@ -409,29 +416,36 @@ test_trace!(test_trace_source_map, "../traces/source-map.trace");
#[test]
fn regression_test_0() {
- Operations(vec![Alloc(1)]).run_single_threaded();
+ Operations(vec![Allocator(1)]).run_single_threaded();
}
#[test]
fn regression_test_1() {
- Operations(vec![Alloc(1414), Free(0), Alloc(1414), Free(1)]).run_single_threaded();
+ Operations(vec![Allocator(1414), Free(0), Allocator(1414), Free(1)]).run_single_threaded();
}
#[test]
fn regression_test_2() {
- Operations(vec![Alloc(168), Free(0), Alloc(0), Alloc(168), Free(2)]).run_single_threaded();
+ Operations(vec![
+ Allocator(168),
+ Free(0),
+ Allocator(0),
+ Allocator(168),
+ Free(2),
+ ])
+ .run_single_threaded();
}
#[test]
fn regression_test_3() {
- Operations(vec![Alloc(13672), Free(0), Alloc(1)]).run_single_threaded();
+ Operations(vec![Allocator(13672), Free(0), Allocator(1)]).run_single_threaded();
}
#[test]
fn allocate_size_zero() {
use std::iter;
Operations(
- iter::repeat(Alloc(0))
+ iter::repeat(Allocator(0))
.take(1000)
.chain((0..1000).map(|i| Free(i)))
.collect(),
@@ -444,10 +458,10 @@ fn allocate_many_small() {
use std::iter;
Operations(
- iter::repeat(Alloc(16 * mem::size_of::()))
+ iter::repeat(Allocator(16 * mem::size_of::()))
.take(100)
.chain((0..100).map(|i| Free(i)))
- .chain(iter::repeat(Alloc(256 * mem::size_of::())).take(100))
+ .chain(iter::repeat(Allocator(256 * mem::size_of::())).take(100))
.chain((0..100).map(|i| Free(i + 100)))
.collect(),
)
@@ -459,10 +473,10 @@ fn allocate_many_large() {
use std::iter;
Operations(
- iter::repeat(Alloc(257 * mem::size_of::()))
+ iter::repeat(Allocator(257 * mem::size_of::()))
.take(100)
.chain((0..100).map(|i| Free(i)))
- .chain(iter::repeat(Alloc(1024 * mem::size_of::())).take(100))
+ .chain(iter::repeat(Allocator(1024 * mem::size_of::())).take(100))
.chain((0..100).map(|i| Free(i + 100)))
.collect(),
)
@@ -477,40 +491,38 @@ fn allocate_many_large() {
#[test]
fn smoke() {
- let mut a = &wee_alloc::WeeAlloc::INIT;
+ let a = &wee_alloc::WeeAlloc::INIT;
unsafe {
let layout = Layout::new::();
let ptr = a
- .alloc(layout.clone())
+ .allocate(layout.clone())
.expect("Should be able to alloc a fresh Layout clone");
{
- let ptr = ptr.as_ptr() as *mut u8;
+ let ptr = ptr.as_mut_ptr();
*ptr = 9;
assert_eq!(*ptr, 9);
}
- a.dealloc(ptr, layout.clone());
+ a.deallocate(ptr.as_non_null_ptr(), layout.clone());
let ptr = a
- .alloc(layout.clone())
+ .allocate(layout.clone())
.expect("Should be able to alloc from a second clone");
{
- let ptr = ptr.as_ptr() as *mut u8;
+ let ptr = ptr.as_mut_ptr();
*ptr = 10;
assert_eq!(*ptr, 10);
}
- a.dealloc(ptr, layout.clone());
+ a.deallocate(ptr.as_non_null_ptr(), layout.clone());
}
}
#[test]
fn cannot_alloc_max_usize() {
- let mut a = &wee_alloc::WeeAlloc::INIT;
- unsafe {
- let layout = Layout::from_size_align(std::usize::MAX, 1)
- .expect("should be able to create a `Layout` with size = std::usize::MAX");
- let result = a.alloc(layout);
- assert!(result.is_err());
- }
+ let a = &wee_alloc::WeeAlloc::INIT;
+ let layout = Layout::from_size_align(std::usize::MAX, 1)
+ .expect("should be able to create a `Layout` with size = std::usize::MAX");
+ let result = a.allocate(layout);
+ assert!(result.is_err());
}
// This takes too long with our extra assertion checks enabled,
@@ -521,17 +533,17 @@ fn stress() {
use rand::Rng;
use std::cmp;
- let mut a = &wee_alloc::WeeAlloc::INIT;
+ let a = &wee_alloc::WeeAlloc::INIT;
let mut rng = rand::weak_rng();
- let mut ptrs = Vec::new();
+ let mut ptrs: Vec<(_, Layout)> = Vec::new();
unsafe {
for _ in 0..100_000 {
let free =
ptrs.len() > 0 && ((ptrs.len() < 1_000 && rng.gen_weighted_bool(3)) || rng.gen());
if free {
let idx = rng.gen_range(0, ptrs.len());
- let (ptr, layout): (_, Layout) = ptrs.swap_remove(idx);
- a.dealloc(ptr, layout);
+ let (ptr, layout): (std::ptr::NonNull<[u8]>, Layout) = ptrs.swap_remove(idx);
+ a.deallocate(ptr.as_non_null_ptr(), layout);
continue;
}
@@ -549,11 +561,15 @@ fn stress() {
};
let mut tmp = Vec::new();
for i in 0..cmp::min(old.size(), new.size()) {
- tmp.push(*(ptr.as_ptr() as *mut u8).offset(i as isize));
+ tmp.push(*(ptr.as_mut_ptr()).offset(i as isize));
}
- let ptr = a.realloc(ptr, old, new.size()).unwrap();
+ let ptr = if new.size() >= old.size() {
+ a.grow(ptr.as_non_null_ptr(), old, new).unwrap()
+ } else {
+ a.shrink(ptr.as_non_null_ptr(), old, new).unwrap()
+ };
for (i, byte) in tmp.iter().enumerate() {
- assert_eq!(*byte, *(ptr.as_ptr() as *mut u8).offset(i as isize));
+ assert_eq!(*byte, *(ptr.as_mut_ptr()).offset(i as isize));
}
ptrs.push((ptr, new));
}
@@ -569,15 +585,15 @@ fn stress() {
let layout = Layout::from_size_align(size, align).unwrap();
let ptr = if zero {
- a.alloc_zeroed(layout.clone()).unwrap()
+ a.allocate_zeroed(layout.clone()).unwrap()
} else {
- a.alloc(layout.clone()).unwrap()
+ a.allocate(layout.clone()).unwrap()
};
for i in 0..layout.size() {
if zero {
- assert_eq!(*(ptr.as_ptr() as *mut u8).offset(i as isize), 0);
+ assert_eq!(*(ptr.as_mut_ptr()).offset(i as isize), 0);
}
- *(ptr.as_ptr() as *mut u8).offset(i as isize) = 0xce;
+ *(ptr.as_mut_ptr()).offset(i as isize) = 0xce;
}
ptrs.push((ptr, layout));
}
diff --git a/wee_alloc/build.rs b/wee_alloc/build.rs
index 7a8dc1c..5c67d51 100644
--- a/wee_alloc/build.rs
+++ b/wee_alloc/build.rs
@@ -15,10 +15,14 @@ fn create_static_array_backend_size_bytes_file() {
let out_dir = env::var("OUT_DIR").expect("OUT_DIR environment variable not provided");
let dest_path = Path::new(&out_dir).join("wee_alloc_static_array_backend_size_bytes.txt");
let size: u32 = match env::var(WEE_ALLOC_STATIC_ARRAY_BACKEND_BYTES) {
- Ok(s) => s.parse().expect("Could not interpret WEE_ALLOC_STATIC_ARRAY_BACKEND_BYTES as a 32 bit unsigned integer"),
+ Ok(s) => s.parse().expect(
+ "Could not interpret WEE_ALLOC_STATIC_ARRAY_BACKEND_BYTES as a 32 bit unsigned integer",
+ ),
Err(ve) => match ve {
- VarError::NotPresent => { DEFAULT_STATIC_ARRAY_BACKEND_SIZE_BYTES },
- VarError::NotUnicode(_) => { panic!("Could not interpret WEE_ALLOC_STATIC_ARRAY_BACKEND_BYTES as a string representing a 32 bit unsigned integer")},
+ VarError::NotPresent => DEFAULT_STATIC_ARRAY_BACKEND_SIZE_BYTES,
+ VarError::NotUnicode(_) => {
+ panic!("Could not interpret WEE_ALLOC_STATIC_ARRAY_BACKEND_BYTES as a string representing a 32 bit unsigned integer")
+ }
},
};
let mut f = File::create(&dest_path)
diff --git a/wee_alloc/src/imp_static_array.rs b/wee_alloc/src/imp_static_array.rs
index 0b98784..8e4b65b 100644
--- a/wee_alloc/src/imp_static_array.rs
+++ b/wee_alloc/src/imp_static_array.rs
@@ -1,4 +1,4 @@
-use super::AllocErr;
+use super::AllocError;
use const_init::ConstInit;
#[cfg(feature = "extra_assertions")]
use core::cell::Cell;
@@ -17,16 +17,16 @@ struct ScratchHeap([u8; SCRATCH_LEN_BYTES]);
static mut SCRATCH_HEAP: ScratchHeap = ScratchHeap([0; SCRATCH_LEN_BYTES]);
static mut OFFSET: Mutex = Mutex::new(0);
-pub(crate) unsafe fn alloc_pages(pages: Pages) -> Result, AllocErr> {
+pub(crate) unsafe fn alloc_pages(pages: Pages) -> Result, AllocError> {
let bytes: Bytes = pages.into();
let mut offset = OFFSET.lock();
- let end = bytes.0.checked_add(*offset).ok_or(AllocErr)?;
+ let end = bytes.0.checked_add(*offset).ok_or(AllocError)?;
if end < SCRATCH_LEN_BYTES {
let ptr = SCRATCH_HEAP.0[*offset..end].as_mut_ptr() as *mut u8;
*offset = end;
- NonNull::new(ptr).ok_or_else(|| AllocErr)
+ NonNull::new(ptr).ok_or_else(|| AllocError)
} else {
- Err(AllocErr)
+ Err(AllocError)
}
}
diff --git a/wee_alloc/src/imp_unix.rs b/wee_alloc/src/imp_unix.rs
index e2c6383..6dcd086 100644
--- a/wee_alloc/src/imp_unix.rs
+++ b/wee_alloc/src/imp_unix.rs
@@ -1,11 +1,11 @@
-use super::AllocErr;
+use super::AllocError;
use const_init::ConstInit;
use core::cell::UnsafeCell;
use core::ptr;
use libc;
use memory_units::{Bytes, Pages};
-pub(crate) fn alloc_pages(pages: Pages) -> Result, AllocErr> {
+pub(crate) fn alloc_pages(pages: Pages) -> Result, AllocError> {
unsafe {
let bytes: Bytes = pages.into();
let addr = libc::mmap(
@@ -17,9 +17,9 @@ pub(crate) fn alloc_pages(pages: Pages) -> Result, AllocErr> {
0,
);
if addr == libc::MAP_FAILED {
- Err(AllocErr)
+ Err(AllocError)
} else {
- ptr::NonNull::new(addr as *mut u8).ok_or(AllocErr)
+ ptr::NonNull::new(addr as *mut u8).ok_or(AllocError)
}
}
}
diff --git a/wee_alloc/src/imp_wasm32.rs b/wee_alloc/src/imp_wasm32.rs
index 12bc8ad..21bb1f6 100644
--- a/wee_alloc/src/imp_wasm32.rs
+++ b/wee_alloc/src/imp_wasm32.rs
@@ -1,19 +1,19 @@
-use super::{assert_is_word_aligned, PAGE_SIZE, unchecked_unwrap};
+use super::AllocError;
+use super::{assert_is_word_aligned, unchecked_unwrap, PAGE_SIZE};
use const_init::ConstInit;
-use super::AllocErr;
use core::arch::wasm32;
use core::cell::UnsafeCell;
use core::ptr::NonNull;
use memory_units::Pages;
-pub(crate) unsafe fn alloc_pages(n: Pages) -> Result, AllocErr> {
+pub(crate) unsafe fn alloc_pages(n: Pages) -> Result, AllocError> {
let ptr = wasm32::memory_grow(0, n.0);
if ptr != usize::max_value() {
let ptr = (ptr * PAGE_SIZE.0) as *mut u8;
assert_is_word_aligned(ptr as *mut u8);
Ok(unchecked_unwrap(NonNull::new(ptr)))
} else {
- Err(AllocErr)
+ Err(AllocError)
}
}
diff --git a/wee_alloc/src/imp_windows.rs b/wee_alloc/src/imp_windows.rs
index 8114eab..bd0db08 100644
--- a/wee_alloc/src/imp_windows.rs
+++ b/wee_alloc/src/imp_windows.rs
@@ -1,5 +1,5 @@
+use super::AllocError;
use const_init::ConstInit;
-use super::AllocErr;
use core::cell::UnsafeCell;
use core::ptr::NonNull;
use memory_units::{Bytes, Pages};
@@ -7,14 +7,14 @@ use memory_units::{Bytes, Pages};
use winapi::shared::ntdef::NULL;
use winapi::um::memoryapi::VirtualAlloc;
use winapi::um::synchapi::{
- SRWLOCK, SRWLOCK_INIT, AcquireSRWLockExclusive, ReleaseSRWLockExclusive,
+ AcquireSRWLockExclusive, ReleaseSRWLockExclusive, SRWLOCK, SRWLOCK_INIT,
};
use winapi::um::winnt::{MEM_COMMIT, PAGE_READWRITE};
-pub(crate) fn alloc_pages(pages: Pages) -> Result, AllocErr> {
+pub(crate) fn alloc_pages(pages: Pages) -> Result, AllocError> {
let bytes: Bytes = pages.into();
let ptr = unsafe { VirtualAlloc(NULL, bytes.0, MEM_COMMIT, PAGE_READWRITE) };
- NonNull::new(ptr as *mut u8).ok_or(AllocErr)
+ NonNull::new(ptr as *mut u8).ok_or(AllocError)
}
// Align to the cache line size on an i7 to avoid false sharing.
diff --git a/wee_alloc/src/lib.rs b/wee_alloc/src/lib.rs
index f4aff45..18598ee 100644
--- a/wee_alloc/src/lib.rs
+++ b/wee_alloc/src/lib.rs
@@ -65,7 +65,7 @@ static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
nightly Rust.
- **nightly**: Enable usage of nightly-only Rust features, such as implementing
- the `Alloc` trait (not to be confused with the stable `GlobalAlloc` trait!)
+ the `Allocator` trait (not to be confused with the stable `GlobalAlloc` trait!)
## Implementation Notes and Constraints
@@ -218,9 +218,9 @@ mod size_classes;
cfg_if! {
if #[cfg(feature = "nightly")] {
- use core::alloc::{Alloc, AllocErr};
+ use core::alloc::{Allocator, AllocError};
} else {
- pub(crate) struct AllocErr;
+ pub(crate) struct AllocError;
}
}
@@ -231,12 +231,33 @@ use core::cmp;
use core::marker::Sync;
use core::mem;
use core::ptr::{self, NonNull};
+use core::slice;
use memory_units::{size_of, ByteSize, Bytes, Pages, RoundUpTo, Words};
use neighbors::Neighbors;
/// The WebAssembly page size, in bytes.
pub const PAGE_SIZE: Bytes = Bytes(65536);
+// TODO: replace with NonNull::slice_from_raw_parts once stable
+#[inline]
+fn nonnull_slice_from_raw_parts(data: NonNull, len: usize) -> NonNull<[T]> {
+ unsafe { NonNull::new_unchecked(&mut *slice::from_raw_parts_mut(data.as_ptr(), len)) }
+}
+
+// TODO: replace with ptr.as_non_null_ptr() once stabilized
+#[inline]
+const fn nonnull_slice_as_non_null_ptr(ptr: NonNull<[T]>) -> NonNull {
+ let mut_slice: *mut [T] = ptr.as_ptr();
+ let buff_ptr: *mut T = mut_slice as *mut T;
+ unsafe { NonNull::new_unchecked(buff_ptr) }
+}
+
+// TODO: replace with ptr.as_mut_ptr() once stabilized
+#[inline]
+const fn nonnull_slice_as_mut_ptr(ptr: NonNull<[T]>) -> *mut T {
+ nonnull_slice_as_non_null_ptr(ptr).as_ptr()
+}
+
extra_only! {
fn assert_is_word_aligned(ptr: *const T) {
assert_aligned_to(ptr, size_of::());
@@ -529,7 +550,6 @@ impl<'a> FreeCell<'a> {
policy: &dyn AllocPolicy<'a>,
) -> Option<&'b AllocatedCell<'a>> {
extra_assert!(alloc_size.0 > 0);
- extra_assert!(align.0 > 0);
extra_assert!(align.0.is_power_of_two());
// First, do a quick check that this cell can hold an allocation of the
@@ -759,7 +779,7 @@ trait AllocPolicy<'a> {
&self,
size: Words,
align: Bytes,
- ) -> Result<*const FreeCell<'a>, AllocErr>;
+ ) -> Result<*const FreeCell<'a>, AllocError>;
fn min_cell_size(&self, alloc_size: Words) -> Words;
@@ -785,7 +805,7 @@ impl<'a> AllocPolicy<'a> for LargeAllocPolicy {
&self,
size: Words,
align: Bytes,
- ) -> Result<*const FreeCell<'a>, AllocErr> {
+ ) -> Result<*const FreeCell<'a>, AllocError> {
// To assure that an allocation will always succeed after refilling the
// free list with this new cell, make sure that we allocate enough to
// fulfill the requested alignment, and still have the minimum cell size
@@ -846,7 +866,7 @@ unsafe fn walk_free_list<'a, F, T>(
head: &Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
mut f: F,
-) -> Result
+) -> Result
where
F: FnMut(&Cell<*const FreeCell<'a>>, &FreeCell<'a>) -> Option,
{
@@ -859,7 +879,7 @@ where
assert_local_cell_invariants(&(*current_free).header);
if current_free.is_null() {
- return Err(AllocErr);
+ return Err(AllocError);
}
let current_free = Cell::new(current_free);
@@ -914,7 +934,7 @@ unsafe fn alloc_first_fit<'a>(
align: Bytes,
head: &Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
-) -> Result, AllocErr> {
+) -> Result, AllocError> {
extra_assert!(size.0 > 0);
walk_free_list(head, policy, |previous, current| {
@@ -922,7 +942,9 @@ unsafe fn alloc_first_fit<'a>(
if let Some(allocated) = current.try_alloc(previous, size, align, policy) {
assert_aligned_to(allocated.data(), align);
- return Some(unchecked_unwrap(NonNull::new(allocated.data() as *mut u8)));
+ let ptr = unchecked_unwrap(NonNull::new(allocated.data() as *mut u8));
+ let slice_len: Bytes = size.into();
+ return Some(nonnull_slice_from_raw_parts(ptr, slice_len.0));
}
None
@@ -934,7 +956,7 @@ unsafe fn alloc_with_refill<'a, 'b>(
align: Bytes,
head: &'b Cell<*const FreeCell<'a>>,
policy: &dyn AllocPolicy<'a>,
-) -> Result, AllocErr> {
+) -> Result, AllocError> {
if let Ok(result) = alloc_first_fit(size, align, head, policy) {
return Ok(result);
}
@@ -988,7 +1010,7 @@ impl<'a> WeeAlloc<'a> {
F: for<'b> FnOnce(&'b Cell<*const FreeCell<'a>>, &'b dyn AllocPolicy<'a>) -> T,
{
extra_assert!(size.0 > 0);
- extra_assert!(align.0 > 0);
+ extra_assert!(align.0.is_power_of_two());
if align <= size_of::() {
if let Some(head) = self.size_classes.get(size) {
@@ -1027,22 +1049,19 @@ impl<'a> WeeAlloc<'a> {
})
}
- unsafe fn alloc_impl(&self, layout: Layout) -> Result, AllocErr> {
+ unsafe fn alloc_impl(&self, layout: Layout) -> Result, AllocError> {
let size = Bytes(layout.size());
- let align = if layout.align() == 0 {
- Bytes(1)
- } else {
- Bytes(layout.align())
- };
+ let align = Bytes(layout.align());
+ extra_assert!(align.0.is_power_of_two());
if size.0 == 0 {
// Ensure that our made up pointer is properly aligned by using the
// alignment as the pointer.
- extra_assert!(align.0 > 0);
- return Ok(NonNull::new_unchecked(align.0 as *mut u8));
+ let ptr = NonNull::new_unchecked(align.0 as *mut u8);
+ return Ok(nonnull_slice_from_raw_parts(ptr, 0));
}
- let word_size: Words = checked_round_up_to(size).ok_or(AllocErr)?;
+ let word_size: Words = checked_round_up_to(size).ok_or(AllocError)?;
self.with_free_list_and_policy_for_size(word_size, align, |head, policy| {
assert_is_valid_free_list(head.get(), policy);
@@ -1142,15 +1161,15 @@ impl<'a> WeeAlloc<'a> {
}
#[cfg(feature = "nightly")]
-unsafe impl<'a, 'b> Alloc for &'b WeeAlloc<'a>
+unsafe impl<'a, 'b> Allocator for &'b WeeAlloc<'a>
where
'a: 'b,
{
- unsafe fn alloc(&mut self, layout: Layout) -> Result, AllocErr> {
- self.alloc_impl(layout)
+ fn allocate(&self, layout: Layout) -> Result, AllocError> {
+ unsafe { self.alloc_impl(layout) }
}
- unsafe fn dealloc(&mut self, ptr: NonNull, layout: Layout) {
+ unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) {
self.dealloc_impl(ptr, layout)
}
}
@@ -1158,8 +1177,8 @@ where
unsafe impl GlobalAlloc for WeeAlloc<'static> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
match self.alloc_impl(layout) {
- Ok(ptr) => ptr.as_ptr(),
- Err(AllocErr) => ptr::null_mut(),
+ Ok(ptr) => nonnull_slice_as_mut_ptr(ptr),
+ Err(AllocError) => ptr::null_mut(),
}
}
diff --git a/wee_alloc/src/size_classes.rs b/wee_alloc/src/size_classes.rs
index 6ff44e2..2775863 100644
--- a/wee_alloc/src/size_classes.rs
+++ b/wee_alloc/src/size_classes.rs
@@ -1,4 +1,7 @@
-use super::{alloc_with_refill, AllocErr, AllocPolicy, CellHeader, FreeCell, LargeAllocPolicy};
+use super::{
+ alloc_with_refill, nonnull_slice_as_mut_ptr, nonnull_slice_as_non_null_ptr, AllocError,
+ AllocPolicy, CellHeader, FreeCell, LargeAllocPolicy,
+};
use const_init::ConstInit;
use core::cell::Cell;
use core::cmp;
@@ -40,8 +43,7 @@ where
&self,
size: Words,
align: Bytes,
- ) -> Result<*const FreeCell<'a>, AllocErr> {
- extra_assert!(align.0 > 0);
+ ) -> Result<*const FreeCell<'a>, AllocError> {
extra_assert!(align.0.is_power_of_two());
extra_assert!(align <= size_of::());
@@ -68,12 +70,13 @@ where
let new_cell_size: Bytes = new_cell_size.into();
let free_cell = FreeCell::from_uninitialized(
- new_cell,
+ nonnull_slice_as_non_null_ptr(new_cell),
new_cell_size - size_of::(),
None,
self as &dyn AllocPolicy,
);
- let next_cell = (new_cell.as_ptr() as *const u8).offset(new_cell_size.0 as isize);
+ let next_cell =
+ (nonnull_slice_as_mut_ptr(new_cell) as *const u8).offset(new_cell_size.0 as isize);
(*free_cell)
.header
.neighbors