diff --git a/.appveyor.yml b/.appveyor.yml index f7d3f990ac..4e8b8cd4b8 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -29,8 +29,6 @@ install: - rustc --version build_script: - - set RUST_TEST_NOCAPTURE=1 - - set RUST_BACKTRACE=1 - set RUSTFLAGS=-C debug-assertions # Build and install miri - cargo build --release --all-features --all-targets @@ -40,6 +38,8 @@ build_script: - set MIRI_SYSROOT=%USERPROFILE%\AppData\Local\rust-lang\miri\cache\HOST test_script: + - set RUST_TEST_NOCAPTURE=1 + - set RUST_BACKTRACE=1 # Test miri - cargo test --release --all-features # Test cargo integration diff --git a/README.md b/README.md index 91dcfd7cf8..be104ed0b2 100644 --- a/README.md +++ b/README.md @@ -333,6 +333,7 @@ Definite bugs found: * [Futures turning a shared reference into a mutable one](https://github.com/rust-lang/rust/pull/56319) * [`str` turning a shared reference into a mutable one](https://github.com/rust-lang/rust/pull/58200) * [`rand` performing unaligned reads](https://github.com/rust-random/rand/issues/779) +* [The Unix allocator calling `posix_memalign` in an invalid way](https://github.com/rust-lang/rust/issues/62251) Violations of Stacked Borrows found that are likely bugs (but Stacked Borrows is currently just an experiment): diff --git a/src/intptrcast.rs b/src/intptrcast.rs index f8102642bd..5480700005 100644 --- a/src/intptrcast.rs +++ b/src/intptrcast.rs @@ -90,6 +90,10 @@ impl<'mir, 'tcx> GlobalState { // From next_base_addr + slack, round up to adjust for alignment. let base_addr = Self::align_addr(global_state.next_base_addr + slack, align.bytes()); entry.insert(base_addr); + trace!( + "Assigning base address {:#x} to allocation {:?} (slack: {}, align: {})", + base_addr, ptr.alloc_id, slack, align.bytes(), + ); // Remember next base address. If this allocation is zero-sized, leave a gap // of at least 1 to avoid two allocations having the same base address. diff --git a/src/shims/foreign_items.rs b/src/shims/foreign_items.rs index 9c9e77abfe..1a39df9cce 100644 --- a/src/shims/foreign_items.rs +++ b/src/shims/foreign_items.rs @@ -51,6 +51,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(Some(this.load_mir(instance.def)?)) } + /// Returns the minimum alignment for the target architecture. + fn min_align(&self) -> Align { + let this = self.eval_context_ref(); + // List taken from `libstd/sys_common/alloc.rs`. + let min_align = match this.tcx.tcx.sess.target.target.arch.as_str() { + "x86" | "arm" | "mips" | "powerpc" | "powerpc64" | "asmjs" | "wasm32" => 8, + "x86_64" | "aarch64" | "mips64" | "s390x" | "sparc64" => 16, + arch => bug!("Unsupported target architecture: {}", arch), + }; + Align::from_bytes(min_align).unwrap() + } + fn malloc( &mut self, size: u64, @@ -61,7 +73,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx if size == 0 { Scalar::from_int(0, this.pointer_size()) } else { - let align = this.tcx.data_layout.pointer_align.abi; + let align = this.min_align(); let ptr = this.memory_mut().allocate(Size::from_bytes(size), align, MiriMemoryKind::C.into()); if zero_init { // We just allocated this, the access cannot fail @@ -94,7 +106,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx new_size: u64, ) -> InterpResult<'tcx, Scalar> { let this = self.eval_context_mut(); - let align = this.tcx.data_layout.pointer_align.abi; + let align = this.min_align(); if old_ptr.is_null_ptr(this) { if new_size == 0 { Ok(Scalar::from_int(0, this.pointer_size())) @@ -191,12 +203,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx if !align.is_power_of_two() { return err!(HeapAllocNonPowerOfTwoAlignment(align)); } + /* + FIXME: This check is disabled because rustc violates it. + See . if align < this.pointer_size().bytes() { return err!(MachineError(format!( "posix_memalign: alignment must be at least the size of a pointer, but is {}", align, ))); } + */ if size == 0 { this.write_null(ret.into())?; } else { diff --git a/tests/run-pass/heap_allocator.rs b/tests/run-pass/heap_allocator.rs index b201f24e25..2f3a48f535 100644 --- a/tests/run-pass/heap_allocator.rs +++ b/tests/run-pass/heap_allocator.rs @@ -1,3 +1,4 @@ +// compile-flags: -Zmiri-seed= #![feature(allocator_api)] use std::ptr::NonNull; @@ -5,47 +6,59 @@ use std::alloc::{Global, Alloc, Layout, System}; use std::slice; fn check_alloc(mut allocator: T) { unsafe { - let layout = Layout::from_size_align(20, 4).unwrap(); - let a = allocator.alloc(layout).unwrap(); - allocator.dealloc(a, layout); + for &align in &[4, 8, 16, 32] { + let layout = Layout::from_size_align(20, align).unwrap(); - let p1 = allocator.alloc_zeroed(layout).unwrap(); + for _ in 0..32 { + let a = allocator.alloc(layout).unwrap(); + assert_eq!(a.as_ptr() as usize % align, 0, "pointer is incorrectly aligned"); + allocator.dealloc(a, layout); + } + + let p1 = allocator.alloc_zeroed(layout).unwrap(); + assert_eq!(p1.as_ptr() as usize % align, 0, "pointer is incorrectly aligned"); - let p2 = allocator.realloc(p1, Layout::from_size_align(20, 4).unwrap(), 40).unwrap(); - let slice = slice::from_raw_parts(p2.as_ptr(), 20); - assert_eq!(&slice, &[0_u8; 20]); + let p2 = allocator.realloc(p1, layout, 40).unwrap(); + let layout = Layout::from_size_align(40, align).unwrap(); + assert_eq!(p2.as_ptr() as usize % align, 0, "pointer is incorrectly aligned"); + let slice = slice::from_raw_parts(p2.as_ptr(), 20); + assert_eq!(&slice, &[0_u8; 20]); - // old size == new size - let p3 = allocator.realloc(p2, Layout::from_size_align(40, 4).unwrap(), 40).unwrap(); - let slice = slice::from_raw_parts(p3.as_ptr(), 20); - assert_eq!(&slice, &[0_u8; 20]); + // old size == new size + let p3 = allocator.realloc(p2, layout, 40).unwrap(); + assert_eq!(p3.as_ptr() as usize % align, 0, "pointer is incorrectly aligned"); + let slice = slice::from_raw_parts(p3.as_ptr(), 20); + assert_eq!(&slice, &[0_u8; 20]); - // old size > new size - let p4 = allocator.realloc(p3, Layout::from_size_align(40, 4).unwrap(), 10).unwrap(); - let slice = slice::from_raw_parts(p4.as_ptr(), 10); - assert_eq!(&slice, &[0_u8; 10]); + // old size > new size + let p4 = allocator.realloc(p3, layout, 10).unwrap(); + let layout = Layout::from_size_align(10, align).unwrap(); + assert_eq!(p4.as_ptr() as usize % align, 0, "pointer is incorrectly aligned"); + let slice = slice::from_raw_parts(p4.as_ptr(), 10); + assert_eq!(&slice, &[0_u8; 10]); - allocator.dealloc(p4, Layout::from_size_align(10, 4).unwrap()); + allocator.dealloc(p4, layout); + } } } fn check_overalign_requests(mut allocator: T) { - let size = 8; - // Greater than `size`. - let align = 16; - // Miri is deterministic; no need to try many times. - let iterations = 1; - unsafe { - let pointers: Vec<_> = (0..iterations).map(|_| { - allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap() - }).collect(); - for &ptr in &pointers { - assert_eq!((ptr.as_ptr() as usize) % align, 0, - "Got a pointer less aligned than requested") - } + for &size in &[2, 8, 64] { // size less than and bigger than alignment + for &align in &[4, 8, 16, 32] { // Be sure to cover less than and bigger than `MIN_ALIGN` for all architectures + let iterations = 32; + unsafe { + let pointers: Vec<_> = (0..iterations).map(|_| { + allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap() + }).collect(); + for &ptr in &pointers { + assert_eq!((ptr.as_ptr() as usize) % align, 0, + "Got a pointer less aligned than requested") + } - // Clean up. - for &ptr in &pointers { - allocator.dealloc(ptr, Layout::from_size_align(size, align).unwrap()) + // Clean up. + for &ptr in &pointers { + allocator.dealloc(ptr, Layout::from_size_align(size, align).unwrap()) + } + } } } } @@ -75,7 +88,6 @@ fn box_to_global() { fn main() { check_alloc(System); check_alloc(Global); - #[cfg(not(target_os = "windows"))] // TODO: Inspects allocation base address on Windows; needs intptrcast model check_overalign_requests(System); check_overalign_requests(Global); global_to_box();