diff --git a/.travis.yml b/.travis.yml index 095af11627..dd7bcd4920 100644 --- a/.travis.yml +++ b/.travis.yml @@ -36,30 +36,34 @@ script: - | # Test and install plain miri cargo build --release --all-features && - cargo test --release --all-features && + #cargo test --release --all-features && cargo install --all-features --force --path . - | # get ourselves a MIR-full libstd xargo/build.sh && export MIRI_SYSROOT=~/.xargo/HOST +#- | +# # run all tests with full mir +# cargo test --release --all-features - | - # run all tests with full mir - cargo test --release --all-features -- | - # test `cargo miri` + # Test cargo integration cd cargo-miri-test && - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then - cargo miri -q - else - cargo miri -q >stdout.real 2>stderr.real && - cat stdout.real stderr.real && - # Test `cargo miri` output. Not on mac because output redirecting doesn't - # work. There is no error. It just stops CI. + # Test `cargo miri` + # We ignore the exit code because we want to see the output even on failure, and + # I found no way to preserve the exit code so that we can test for it later. + # Variables set in this subshell in the parenthesis are not available + # on the outside. + # We assume that if this fails, it'll also print something about the failure on + # stdout/stderr and we'll catch that. + # FIXME: Disabling validation, still investigating whether there is UB here + (cargo miri -q >stdout.real 2>stderr.real -- -Zmiri-disable-validation || true) && + # Print file names and contents (`cat` would just print contents) + tail -n +0 stdout.real stderr.real && + # Verify output diff -u stdout.ref stdout.real && - diff -u stderr.ref stderr.real - fi && + diff -u stderr.ref stderr.real && # test `cargo miri test` - cargo miri test && + cargo miri test && cd .. notifications: diff --git a/rust-version b/rust-version index feed920a16..d025ddacf0 100644 --- a/rust-version +++ b/rust-version @@ -1 +1 @@ -nightly-2018-10-29 +nightly-2018-10-30 diff --git a/src/fn_call.rs b/src/fn_call.rs index 2d142ab20d..9e3f49ac9f 100644 --- a/src/fn_call.rs +++ b/src/fn_call.rs @@ -125,15 +125,15 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo self.write_null(dest)?; } else { let align = self.tcx.data_layout.pointer_align; - let ptr = self.memory.allocate(Size::from_bytes(size), align, MiriMemoryKind::C.into())?; - self.write_scalar(Scalar::Ptr(ptr), dest)?; + let ptr = self.memory_mut().allocate(Size::from_bytes(size), align, MiriMemoryKind::C.into())?; + self.write_scalar(Scalar::Ptr(ptr.with_default_tag()), dest)?; } } "free" => { let ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation, no tag if !ptr.is_null_ptr(&self) { - self.memory.deallocate( + self.memory_mut().deallocate( ptr.to_ptr()?.with_default_tag(), None, MiriMemoryKind::C.into(), @@ -150,9 +150,13 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo if !align.is_power_of_two() { return err!(HeapAllocNonPowerOfTwoAlignment(align)); } - let ptr = self.memory.allocate(Size::from_bytes(size), - Align::from_bytes(align, align).unwrap(), - MiriMemoryKind::Rust.into())?; + let ptr = self.memory_mut() + .allocate( + Size::from_bytes(size), + Align::from_bytes(align, align).unwrap(), + MiriMemoryKind::Rust.into() + )? + .with_default_tag(); self.write_scalar(Scalar::Ptr(ptr), dest)?; } "__rust_alloc_zeroed" => { @@ -164,10 +168,14 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo if !align.is_power_of_two() { return err!(HeapAllocNonPowerOfTwoAlignment(align)); } - let ptr = self.memory.allocate(Size::from_bytes(size), - Align::from_bytes(align, align).unwrap(), - MiriMemoryKind::Rust.into())?; - self.memory.write_repeat(ptr.into(), 0, Size::from_bytes(size))?; + let ptr = self.memory_mut() + .allocate( + Size::from_bytes(size), + Align::from_bytes(align, align).unwrap(), + MiriMemoryKind::Rust.into() + )? + .with_default_tag(); + self.memory_mut().write_repeat(ptr.into(), 0, Size::from_bytes(size))?; self.write_scalar(Scalar::Ptr(ptr), dest)?; } "__rust_dealloc" => { @@ -180,7 +188,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo if !align.is_power_of_two() { return err!(HeapAllocNonPowerOfTwoAlignment(align)); } - self.memory.deallocate( + self.memory_mut().deallocate( ptr.with_default_tag(), Some((Size::from_bytes(old_size), Align::from_bytes(align, align).unwrap())), MiriMemoryKind::Rust.into(), @@ -197,7 +205,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo if !align.is_power_of_two() { return err!(HeapAllocNonPowerOfTwoAlignment(align)); } - let new_ptr = self.memory.reallocate( + let new_ptr = self.memory_mut().reallocate( ptr.with_default_tag(), Size::from_bytes(old_size), Align::from_bytes(align, align).unwrap(), @@ -205,7 +213,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo Align::from_bytes(align, align).unwrap(), MiriMemoryKind::Rust.into(), )?; - self.write_scalar(Scalar::Ptr(new_ptr), dest)?; + self.write_scalar(Scalar::Ptr(new_ptr.with_default_tag()), dest)?; } "syscall" => { @@ -231,7 +239,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo "dlsym" => { let _handle = self.read_scalar(args[0])?; let symbol = self.read_scalar(args[1])?.to_ptr()?.erase_tag(); - let symbol_name = self.memory.read_c_str(symbol.with_default_tag())?; + let symbol_name = self.memory().read_c_str(symbol.with_default_tag())?; let err = format!("bad c unicode symbol: {:?}", symbol_name); let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err); return err!(Unimplemented(format!( @@ -245,7 +253,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo // We abort on panic, so not much is going on here, but we still have to call the closure let f = self.read_scalar(args[0])?.to_ptr()?; let data = self.read_scalar(args[1])?.not_undef()?; - let f_instance = self.memory.get_fn(f)?; + let f_instance = self.memory().get_fn(f)?; self.write_null(dest)?; trace!("__rust_maybe_catch_panic: {:?}", f_instance); @@ -289,8 +297,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo let n = Size::from_bytes(self.read_scalar(args[2])?.to_usize(&self)?); let result = { - let left_bytes = self.memory.read_bytes(left.with_default_tag(), n)?; - let right_bytes = self.memory.read_bytes(right.with_default_tag(), n)?; + let left_bytes = self.memory().read_bytes(left.with_default_tag(), n)?; + let right_bytes = self.memory().read_bytes(right.with_default_tag(), n)?; use std::cmp::Ordering::*; match left_bytes.cmp(right_bytes) { @@ -311,7 +319,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo let ptr = ptr.with_default_tag(); let val = self.read_scalar(args[1])?.to_bytes()? as u8; let num = self.read_scalar(args[2])?.to_usize(&self)?; - if let Some(idx) = self.memory.read_bytes(ptr, Size::from_bytes(num))? + if let Some(idx) = self.memory().read_bytes(ptr, Size::from_bytes(num))? .iter().rev().position(|&c| c == val) { let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), &self)?; @@ -326,7 +334,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo let ptr = ptr.with_default_tag(); let val = self.read_scalar(args[1])?.to_bytes()? as u8; let num = self.read_scalar(args[2])?.to_usize(&self)?; - if let Some(idx) = self.memory.read_bytes(ptr, Size::from_bytes(num))?.iter().position( + if let Some(idx) = self.memory().read_bytes(ptr, Size::from_bytes(num))?.iter().position( |&c| c == val, ) { @@ -340,7 +348,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo "getenv" => { let result = { let name_ptr = self.read_scalar(args[0])?.to_ptr()?.erase_tag(); // raw ptr operation - let name = self.memory.read_c_str(name_ptr.with_default_tag())?; + let name = self.memory().read_c_str(name_ptr.with_default_tag())?; match self.machine.env_vars.get(name) { Some(&var) => Scalar::Ptr(var), None => Scalar::ptr_null(*self.tcx), @@ -354,15 +362,16 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo { let name_ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation if !name_ptr.is_null_ptr(&self) { - let name = self.memory.read_c_str(name_ptr.to_ptr()?.with_default_tag())?; + let name = self.memory().read_c_str(name_ptr.to_ptr()? + .with_default_tag())?.to_owned(); if !name.is_empty() && !name.contains(&b'=') { - success = Some(self.machine.env_vars.remove(name)); + success = Some(self.machine.env_vars.remove(&name)); } } } if let Some(old) = success { if let Some(var) = old { - self.memory.deallocate(var, None, MiriMemoryKind::Env.into())?; + self.memory_mut().deallocate(var, None, MiriMemoryKind::Env.into())?; } self.write_null(dest)?; } else { @@ -375,9 +384,9 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo { let name_ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag(); // raw ptr operation let value_ptr = self.read_scalar(args[1])?.to_ptr()?.erase_tag(); // raw ptr operation - let value = self.memory.read_c_str(value_ptr.with_default_tag())?; + let value = self.memory().read_c_str(value_ptr.with_default_tag())?; if !name_ptr.is_null_ptr(&self) { - let name = self.memory.read_c_str(name_ptr.to_ptr()?.with_default_tag())?; + let name = self.memory().read_c_str(name_ptr.to_ptr()?.with_default_tag())?; if !name.is_empty() && !name.contains(&b'=') { new = Some((name.to_owned(), value.to_owned())); } @@ -385,20 +394,20 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo } if let Some((name, value)) = new { // +1 for the null terminator - let value_copy = self.memory.allocate( + let value_copy = self.memory_mut().allocate( Size::from_bytes((value.len() + 1) as u64), Align::from_bytes(1, 1).unwrap(), MiriMemoryKind::Env.into(), - )?; - self.memory.write_bytes(value_copy.into(), &value)?; + )?.with_default_tag(); + self.memory_mut().write_bytes(value_copy.into(), &value)?; let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), &self)?.into(); - self.memory.write_bytes(trailing_zero_ptr, &[0])?; + self.memory_mut().write_bytes(trailing_zero_ptr, &[0])?; if let Some(var) = self.machine.env_vars.insert( name.to_owned(), value_copy, ) { - self.memory.deallocate(var, None, MiriMemoryKind::Env.into())?; + self.memory_mut().deallocate(var, None, MiriMemoryKind::Env.into())?; } self.write_null(dest)?; } else { @@ -415,7 +424,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo // stdout/stderr use std::io::{self, Write}; - let buf_cont = self.memory.read_bytes(buf.with_default_tag(), Size::from_bytes(n))?; + let buf_cont = self.memory().read_bytes(buf.with_default_tag(), Size::from_bytes(n))?; let res = if fd == 1 { io::stdout().write(buf_cont) } else { @@ -437,7 +446,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo "strlen" => { let ptr = self.read_scalar(args[0])?.to_ptr()?.erase_tag(); - let n = self.memory.read_c_str(ptr.with_default_tag())?.len(); + let n = self.memory().read_c_str(ptr.with_default_tag())?.len(); self.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?; } @@ -487,9 +496,9 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo // Extract the function type out of the signature (that seems easier than constructing it ourselves...) let dtor = match self.read_scalar(args[1])?.not_undef()? { - Scalar::Ptr(dtor_ptr) => Some(self.memory.get_fn(dtor_ptr)?), + Scalar::Ptr(dtor_ptr) => Some(self.memory().get_fn(dtor_ptr)?), Scalar::Bits { bits: 0, size } => { - assert_eq!(size as u64, self.memory.pointer_size().bytes()); + assert_eq!(size as u64, self.memory().pointer_size().bytes()); None }, Scalar::Bits { .. } => return err!(ReadBytesAsPointer), @@ -505,7 +514,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) { return err!(OutOfTls); } - self.memory.write_scalar( + self.memory_mut().write_scalar( key_ptr.with_default_tag(), key_layout.align, Scalar::from_uint(key, key_layout.size).into(), diff --git a/src/intrinsic.rs b/src/intrinsic.rs index dceeaf89aa..ee8df3e25e 100644 --- a/src/intrinsic.rs +++ b/src/intrinsic.rs @@ -153,7 +153,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' // erase tags: this is a raw ptr operation let src = self.read_scalar(args[0])?.not_undef()?.erase_tag(); let dest = self.read_scalar(args[1])?.not_undef()?.erase_tag(); - self.memory.copy( + self.memory_mut().copy( src.with_default_tag(), elem_align, dest.with_default_tag(), @@ -260,7 +260,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' // Do it in memory let mplace = self.force_allocation(dest)?; assert!(mplace.meta.is_none()); - self.memory.write_repeat(mplace.ptr, 0, dest.layout.size)?; + self.memory_mut().write_repeat(mplace.ptr, 0, dest.layout.size)?; } } } @@ -423,7 +423,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' // Do it in memory let mplace = self.force_allocation(dest)?; assert!(mplace.meta.is_none()); - self.memory.mark_definedness(mplace.ptr.to_ptr()?, dest.layout.size, false)?; + self.memory_mut().mark_definedness(mplace.ptr.to_ptr()?, dest.layout.size, false)?; } } } @@ -435,8 +435,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' let val_byte = self.read_scalar(args[1])?.to_u8()?; let ptr = self.read_scalar(args[0])?.not_undef()?.erase_tag().with_default_tag(); let count = self.read_scalar(args[2])?.to_usize(&self)?; - self.memory.check_align(ptr, ty_layout.align)?; - self.memory.write_repeat(ptr, val_byte, ty_layout.size * count)?; + self.memory().check_align(ptr, ty_layout.align)?; + self.memory_mut().write_repeat(ptr, val_byte, ty_layout.size * count)?; } name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))), diff --git a/src/lib.rs b/src/lib.rs index a93a48cb09..9f4eb00877 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -18,7 +18,7 @@ use std::borrow::Cow; use rustc::ty::{self, Ty, TyCtxt, query::TyCtxtAt}; use rustc::ty::layout::{TyLayout, LayoutOf, Size}; -use rustc::hir::def_id::DefId; +use rustc::hir::{self, def_id::DefId}; use rustc::mir; use syntax::attr; @@ -44,8 +44,12 @@ use range_map::RangeMap; #[allow(unused_imports)] // FIXME rustc bug https://github.com/rust-lang/rust/issues/53682 use helpers::{ScalarExt, EvalContextExt as HelpersEvalContextExt}; use mono_hash_map::MonoHashMap; -use stacked_borrows::{EvalContextExt as StackedBorEvalContextExt, Borrow}; +use stacked_borrows::{EvalContextExt as StackedBorEvalContextExt}; +// Used by priroda +pub use stacked_borrows::{Borrow, Stacks, Mut as MutBorrow}; + +// Used by priroda pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>( tcx: TyCtxt<'a, 'tcx, 'tcx>, main_id: DefId, @@ -108,7 +112,7 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>( let mut args = ecx.frame().mir.args_iter(); // First argument: pointer to main() - let main_ptr = ecx.memory_mut().create_fn_alloc(main_instance); + let main_ptr = ecx.memory_mut().create_fn_alloc(main_instance).with_default_tag(); let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?; ecx.write_scalar(Scalar::Ptr(main_ptr), dest)?; @@ -119,12 +123,12 @@ pub fn create_ecx<'a, 'mir: 'a, 'tcx: 'mir>( // FIXME: extract main source file path // Third argument (argv): &[b"foo"] let dest = ecx.eval_place(&mir::Place::Local(args.next().unwrap()))?; - let foo = ecx.memory.allocate_static_bytes(b"foo\0"); + let foo = ecx.memory_mut().allocate_static_bytes(b"foo\0").with_default_tag(); let foo_ty = ecx.tcx.mk_imm_ptr(ecx.tcx.types.u8); let foo_layout = ecx.layout_of(foo_ty)?; let foo_place = ecx.allocate(foo_layout, MiriMemoryKind::Env.into())?; ecx.write_scalar(Scalar::Ptr(foo), foo_place.into())?; - ecx.memory.mark_immutable(foo_place.to_ptr()?.alloc_id)?; + ecx.memory_mut().mark_immutable(foo_place.to_ptr()?.alloc_id)?; ecx.write_scalar(foo_place.ptr, dest)?; assert!(args.next().is_none(), "start lang item has more arguments than expected"); @@ -404,7 +408,7 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> { Ok(()) } - fn static_with_default_tag( + fn adjust_static_allocation( alloc: &'_ Allocation ) -> Cow<'_, Allocation> { let alloc: Allocation = Allocation { @@ -434,7 +438,7 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> { #[inline(always)] fn memory_deallocated( - alloc: &mut Allocation, + alloc: &mut Allocation, ptr: Pointer, ) -> EvalResult<'tcx> { alloc.extra.memory_deallocated(ptr) @@ -443,30 +447,53 @@ impl<'a, 'mir, 'tcx> Machine<'a, 'mir, 'tcx> for Evaluator<'tcx> { #[inline(always)] fn tag_reference( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - ptr: Pointer, - pointee_ty: Ty<'tcx>, - pointee_size: Size, - borrow_kind: Option, - ) -> EvalResult<'tcx, Borrow> { - if !ecx.machine.validate { + place: MemPlace, + ty: Ty<'tcx>, + size: Size, + mutability: Option, + ) -> EvalResult<'tcx, MemPlace> { + if !ecx.machine.validate || size == Size::ZERO { // No tracking - Ok(Borrow::default()) + Ok(place) } else { - ecx.tag_reference(ptr, pointee_ty, pointee_size, borrow_kind) + let ptr = place.ptr.to_ptr()?; + let tag = ecx.tag_reference(ptr, ty, size, mutability.into())?; + let ptr = Scalar::Ptr(Pointer::new_with_tag(ptr.alloc_id, ptr.offset, tag)); + Ok(MemPlace { ptr, ..place }) } } #[inline(always)] fn tag_dereference( ecx: &EvalContext<'a, 'mir, 'tcx, Self>, - ptr: Pointer, - ptr_ty: Ty<'tcx>, - ) -> EvalResult<'tcx, Borrow> { + place: MemPlace, + ty: Ty<'tcx>, + size: Size, + mutability: Option, + ) -> EvalResult<'tcx, MemPlace> { + if !ecx.machine.validate || size == Size::ZERO { + // No tracking + Ok(place) + } else { + let ptr = place.ptr.to_ptr()?; + let tag = ecx.tag_dereference(ptr, ty, size, mutability.into())?; + let ptr = Scalar::Ptr(Pointer::new_with_tag(ptr.alloc_id, ptr.offset, tag)); + Ok(MemPlace { ptr, ..place }) + } + } + + #[inline(always)] + fn tag_new_allocation( + ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + ptr: Pointer, + kind: MemoryKind, + ) -> EvalResult<'tcx, Pointer> { if !ecx.machine.validate { // No tracking - Ok(Borrow::default()) + Ok(ptr.with_default_tag()) } else { - ecx.tag_dereference(ptr, ptr_ty) + let tag = ecx.tag_new_allocation(ptr.alloc_id, kind); + Ok(Pointer::new_with_tag(ptr.alloc_id, ptr.offset, tag)) } } } diff --git a/src/operator.rs b/src/operator.rs index a11f8f34e7..0eac5c1127 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -80,8 +80,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' Ge => left.offset >= right.offset, Sub => { // subtract the offsets - let left_offset = Scalar::from_uint(left.offset.bytes(), self.memory.pointer_size()); - let right_offset = Scalar::from_uint(right.offset.bytes(), self.memory.pointer_size()); + let left_offset = Scalar::from_uint(left.offset.bytes(), self.memory().pointer_size()); + let right_offset = Scalar::from_uint(right.offset.bytes(), self.memory().pointer_size()); let layout = self.layout_of(self.tcx.types.usize)?; return self.binary_op( Sub, @@ -103,7 +103,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' self.ptr_int_arithmetic( bin_op, left.to_ptr().expect("we checked is_ptr"), - right.to_bits(self.memory.pointer_size()).expect("we checked is_bits"), + right.to_bits(self.memory().pointer_size()).expect("we checked is_bits"), right_layout.abi.is_signed(), ) } @@ -113,7 +113,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' self.ptr_int_arithmetic( bin_op, right.to_ptr().expect("we checked is_ptr"), - left.to_bits(self.memory.pointer_size()).expect("we checked is_bits"), + left.to_bits(self.memory().pointer_size()).expect("we checked is_bits"), left_layout.abi.is_signed(), ) } @@ -142,8 +142,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' // allocations sit right next to each other. The C/C++ standards are // somewhat fuzzy about this case, so I think for now this check is // "good enough". - self.memory.check_bounds_ptr(left, false)?; - self.memory.check_bounds_ptr(right, false)?; + self.memory().check_bounds_ptr(left, false)?; + self.memory().check_bounds_ptr(right, false)?; // Two live in-bounds pointers, we can compare across allocations left == right } @@ -153,7 +153,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' (Scalar::Bits { bits, size }, Scalar::Ptr(ptr)) => { assert_eq!(size as u64, self.pointer_size().bytes()); let bits = bits as u64; - let (alloc_size, alloc_align) = self.memory.get_size_and_align(ptr.alloc_id); + let (alloc_size, alloc_align) = self.memory().get_size_and_align(ptr.alloc_id); // Case I: Comparing with NULL if bits == 0 { @@ -223,15 +223,15 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' map_to_primval(left.overflowing_offset(Size::from_bytes(right as u64), self)), BitAnd if !signed => { - let ptr_base_align = self.memory.get(left.alloc_id)?.align.abi(); + let ptr_base_align = self.memory().get(left.alloc_id)?.align.abi(); let base_mask = { // FIXME: Use interpret::truncate, once that takes a Size instead of a Layout - let shift = 128 - self.memory.pointer_size().bits(); + let shift = 128 - self.memory().pointer_size().bits(); let value = !(ptr_base_align as u128 - 1); // truncate (shift left to drop out leftover values, shift right to fill with zeroes) (value << shift) >> shift }; - let ptr_size = self.memory.pointer_size().bytes() as u8; + let ptr_size = self.memory().pointer_size().bytes() as u8; trace!("Ptr BitAnd, align {}, operand {:#010x}, base_mask {:#010x}", ptr_base_align, right, base_mask); if right & base_mask == base_mask { @@ -256,9 +256,9 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' Rem if !signed => { // Doing modulo a divisor of the alignment is allowed. // (Intuition: Modulo a divisor leaks less information.) - let ptr_base_align = self.memory.get(left.alloc_id)?.align.abi(); + let ptr_base_align = self.memory().get(left.alloc_id)?.align.abi(); let right = right as u64; - let ptr_size = self.memory.pointer_size().bytes() as u8; + let ptr_size = self.memory().pointer_size().bytes() as u8; if right == 1 { // modulo 1 is always 0 (Scalar::Bits { bits: 0, size: ptr_size }, false) @@ -295,9 +295,9 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' if let Scalar::Ptr(ptr) = ptr { // Both old and new pointer must be in-bounds. // (Of the same allocation, but that part is trivial with our representation.) - self.memory.check_bounds_ptr(ptr, false)?; + self.memory().check_bounds_ptr(ptr, false)?; let ptr = ptr.signed_offset(offset, self)?; - self.memory.check_bounds_ptr(ptr, false)?; + self.memory().check_bounds_ptr(ptr, false)?; Ok(Scalar::Ptr(ptr)) } else { // An integer pointer. They can only be offset by 0, and we pretend there diff --git a/src/stacked_borrows.rs b/src/stacked_borrows.rs index 077deceecf..a569ed4e55 100644 --- a/src/stacked_borrows.rs +++ b/src/stacked_borrows.rs @@ -1,10 +1,10 @@ -use std::cell::RefCell; +use std::cell::{Cell, RefCell}; use rustc::ty::{Ty, layout::Size}; -use rustc::mir; +use rustc::hir; use super::{ - MemoryAccess, RangeMap, EvalResult, + MemoryAccess, MemoryKind, MiriMemoryKind, RangeMap, EvalResult, AllocId, Pointer, }; @@ -21,12 +21,20 @@ pub enum Mut { impl Mut { #[inline(always)] - fn is_raw(self) -> bool { + pub fn is_raw(self) -> bool { match self { Mut::Raw => true, _ => false, } } + + #[inline(always)] + pub fn is_uniq(self) -> bool { + match self { + Mut::Uniq(_) => true, + _ => false, + } + } } /// Information about any kind of borrow @@ -40,9 +48,17 @@ pub enum Borrow { impl Borrow { #[inline(always)] - fn is_uniq(self) -> bool { + pub fn is_uniq(self) -> bool { + match self { + Borrow::Mut(m) => m.is_uniq(), + _ => false, + } + } + + #[inline(always)] + pub fn is_frz(self) -> bool { match self { - Borrow::Mut(Mut::Uniq(_)) => true, + Borrow::Frz(_) => true, _ => false, } } @@ -64,15 +80,33 @@ impl Default for Borrow { } } +/// What kind of reference are we talking about? +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub enum RefKind { + Mut, + Shr, + Raw, +} + +impl From> for RefKind { + fn from(mutbl: Option) -> Self { + match mutbl { + None => RefKind::Raw, + Some(hir::MutMutable) => RefKind::Mut, + Some(hir::MutImmutable) => RefKind::Shr, + } + } +} + /// Extra global machine state #[derive(Clone, Debug)] pub struct State { - clock: Timestamp + clock: Cell } impl State { pub fn new() -> State { - State { clock: 0 } + State { clock: Cell::new(0) } } } @@ -86,7 +120,7 @@ struct Stack { impl Default for Stack { fn default() -> Self { Stack { - borrows: Vec::new(), + borrows: vec![BorStackItem::Mut(Mut::Raw)], frozen_since: None, } } @@ -100,6 +134,9 @@ pub struct Stacks { /// Core operations impl<'tcx> Stack { + /// Check if `bor` is currently active. We accept a `Raw` on a frozen location + /// because this could be a shared (re)borrow. If you want to mutate, this + /// is not the right function to call! fn check(&self, bor: Borrow) -> bool { match bor { Borrow::Frz(acc_t) => @@ -115,62 +152,94 @@ impl<'tcx> Stack { } } - /// Reactive `bor` for this stack. If `force_mut` is set, we want to aggressively - /// unfreeze this location (because we are about to push a `Uniq`). - fn reactivate(&mut self, bor: Borrow, force_mut: bool) -> EvalResult<'tcx> { + /// Check if `bor` could be activated by unfreezing and popping. + /// `force_mut` indicates whether being frozen is potentially acceptable. + /// Returns `Err` if the answer is "no"; otherwise the data says + /// what needs to happen to activate this: `None` = nothing, + /// `Some(n)` = unfreeze and make item `n` the top item of the stack. + fn reactivatable(&self, bor: Borrow, force_mut: bool) -> Result, String> { // Unless mutation is bound to happen, do NOT change anything if `bor` is already active. // In particular, if it is a `Mut(Raw)` and we are frozen, this should be a NOP. if !force_mut && self.check(bor) { - return Ok(()); + return Ok(None); } let acc_m = match bor { - Borrow::Frz(_) => - if force_mut { - return err!(MachineError(format!("Using a shared borrow for mutation"))) + Borrow::Frz(since) => + return Err(if force_mut { + format!("Using a shared borrow for mutation") } else { - return err!(MachineError(format!("Location should be frozen but it is not"))) - } - Borrow::Mut(acc_m) => acc_m, + format!( + "Location should be frozen since {} but {}", + since, + match self.frozen_since { + None => format!("it is not frozen at all"), + Some(since) => format!("it is only frozen since {}", since), + } + ) + }), + Borrow::Mut(acc_m) => acc_m }; - // We definitely have to unfreeze this, even if we use the topmost item. - self.frozen_since = None; - // Pop until we see the one we are looking for. - while let Some(&itm) = self.borrows.last() { + // This is where we would unfreeze. + for (idx, &itm) in self.borrows.iter().enumerate().rev() { match itm { - BorStackItem::FnBarrier(_) => { - return err!(MachineError(format!("Trying to reactivate a borrow that lives behind a barrier"))); - } + BorStackItem::FnBarrier(_) => + return Err(format!("Trying to reactivate a mutable borrow ({:?}) that lives behind a barrier", acc_m)), BorStackItem::Mut(loc_m) => { - if loc_m == acc_m { return Ok(()); } - trace!("reactivate: Popping {:?}", itm); - self.borrows.pop(); + if loc_m == acc_m { return Ok(Some(idx)); } } } } - // Nothing to be found. Simulate a "virtual raw" element at the bottom of the stack. - if acc_m.is_raw() { - Ok(()) - } else { - err!(MachineError(format!("Borrow-to-reactivate does not exist on the stack"))) + // Nothing to be found. + Err(format!("Mutable borrow-to-reactivate ({:?}) does not exist on the stack", acc_m)) + } + + /// Reactive `bor` for this stack. If `force_mut` is set, we want to aggressively + /// unfreeze this location (because we are about to mutate, so a frozen `Raw` is not okay). + fn reactivate(&mut self, bor: Borrow, force_mut: bool) -> EvalResult<'tcx> { + let action = match self.reactivatable(bor, force_mut) { + Ok(action) => action, + Err(err) => return err!(MachineError(err)), + }; + + match action { + None => {}, // nothing to do + Some(top) => { + self.frozen_since = None; + self.borrows.truncate(top+1); + } } + + Ok(()) } + /// Initiate `bor`; mostly this means freezing or pushing. fn initiate(&mut self, bor: Borrow) -> EvalResult<'tcx> { match bor { Borrow::Frz(t) => { - trace!("initiate: Freezing"); match self.frozen_since { - None => self.frozen_since = Some(t), - Some(since) => assert!(since <= t), + None => { + trace!("initiate: Freezing"); + self.frozen_since = Some(t); + } + Some(since) => { + trace!("initiate: Already frozen"); + assert!(since <= t); + } } } Borrow::Mut(m) => { - trace!("initiate: Pushing {:?}", bor); match self.frozen_since { - None => self.borrows.push(BorStackItem::Mut(m)), + None => { + trace!("initiate: Pushing {:?}", bor); + self.borrows.push(BorStackItem::Mut(m)) + } + Some(_) if m.is_raw() => + // We only ever initiate right after activating the ref we come from. + // If the source ref is fine being frozen, then a raw ref we create + // from it is fine with this as well. + trace!("initiate: Initiating a raw on a frozen location, not doing a thing"), Some(_) => - // FIXME: Do we want an exception for raw borrows? return err!(MachineError(format!("Trying to mutate frozen location"))) } } @@ -180,9 +249,10 @@ impl<'tcx> Stack { } impl State { - fn increment_clock(&mut self) -> Timestamp { - self.clock += 1; - self.clock + fn increment_clock(&self) -> Timestamp { + let val = self.clock.get(); + self.clock.set(val + 1); + val } } @@ -221,14 +291,26 @@ impl<'tcx> Stacks { ptr: Pointer, size: Size, new_bor: Borrow, + permit_redundant: bool, ) -> EvalResult<'tcx> { let mut stacks = self.stacks.borrow_mut(); for stack in stacks.iter_mut(ptr.offset, size) { - if stack.check(new_bor) { + if permit_redundant && stack.check(new_bor) { // The new borrow is already active! This can happen when creating multiple // shared references from the same mutable reference. Do nothing. + trace!("reborrow: New borrow {:?} is already active, not doing a thing", new_bor); } else { - // FIXME: The blog post says we should `reset` if this is a local. + // If we are creating a uniq ref, we certainly want to unfreeze. + // Even if we are doing so from a raw. + // Notice that if this is a local, whenever we access it directly the + // tag here will be the bottommost `Uniq` for that local. That `Uniq` + // never is accessible by the program, so it will not be used by any + // other access. IOW, whenever we directly use a local this will pop + // everything else off the stack, invalidating all previous pointers + // and, in particular, *all* raw pointers. This subsumes the explicit + // `reset` which the blog post [1] says to perform when accessing a local. + // + // [1] https://www.ralfj.de/blog/2018/08/07/stacked-borrows.html stack.reactivate(ptr.tag, /*force_mut*/new_bor.is_uniq())?; stack.initiate(new_bor)?; } @@ -236,73 +318,192 @@ impl<'tcx> Stacks { Ok(()) } + + /// Pushes the first borrow to the stacks, must be a mutable one. + pub fn first_borrow( + &mut self, + mut_borrow: Mut, + size: Size + ) { + for stack in self.stacks.get_mut().iter_mut(Size::ZERO, size) { + assert!(stack.borrows.len() == 1 && stack.frozen_since.is_none()); + assert_eq!(stack.borrows.pop().unwrap(), BorStackItem::Mut(Mut::Raw)); + stack.borrows.push(BorStackItem::Mut(mut_borrow)); + } + } } -/// Machine hooks pub trait EvalContextExt<'tcx> { + fn tag_for_pointee( + &self, + pointee_ty: Ty<'tcx>, + ref_kind: RefKind, + ) -> Borrow; + fn tag_reference( - &mut self, + &self, ptr: Pointer, pointee_ty: Ty<'tcx>, size: Size, - borrow_kind: Option, + ref_kind: RefKind, ) -> EvalResult<'tcx, Borrow>; + fn tag_dereference( &self, ptr: Pointer, - ptr_ty: Ty<'tcx>, + pointee_ty: Ty<'tcx>, + size: Size, + ref_kind: RefKind, ) -> EvalResult<'tcx, Borrow>; + + fn tag_new_allocation( + &mut self, + id: AllocId, + kind: MemoryKind, + ) -> Borrow; } impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, 'tcx> { - fn tag_reference( - &mut self, - ptr: Pointer, + fn tag_for_pointee( + &self, pointee_ty: Ty<'tcx>, - size: Size, - borrow_kind: Option, - ) -> EvalResult<'tcx, Borrow> { + ref_kind: RefKind, + ) -> Borrow { let time = self.machine.stacked_borrows.increment_clock(); - let new_bor = match borrow_kind { - Some(mir::BorrowKind::Mut { .. }) => Borrow::Mut(Mut::Uniq(time)), - Some(_) => + match ref_kind { + RefKind::Mut => Borrow::Mut(Mut::Uniq(time)), + RefKind::Shr => // FIXME This does not do enough checking when only part of the data has // interior mutability. When the type is `(i32, Cell)`, we want the // first field to be frozen but not the second. if self.type_is_freeze(pointee_ty) { Borrow::Frz(time) } else { + // Shared reference with interior mutability. Borrow::Mut(Mut::Raw) }, - None => Borrow::Mut(Mut::Raw), - }; + RefKind::Raw => Borrow::Mut(Mut::Raw), + } + } + + /// Called for place-to-value conversion. + fn tag_reference( + &self, + ptr: Pointer, + pointee_ty: Ty<'tcx>, + size: Size, + ref_kind: RefKind, + ) -> EvalResult<'tcx, Borrow> { + let new_bor = self.tag_for_pointee(pointee_ty, ref_kind); trace!("tag_reference: Creating new reference ({:?}) for {:?} (pointee {}, size {}): {:?}", - borrow_kind, ptr, pointee_ty, size.bytes(), new_bor); + ref_kind, ptr, pointee_ty, size.bytes(), new_bor); // Make sure this reference is not dangling or so - self.memory.check_bounds(ptr, size, false)?; + self.memory().check_bounds(ptr, size, false)?; // Update the stacks. We cannot use `get_mut` becuse this might be immutable // memory. - let alloc = self.memory.get(ptr.alloc_id).expect("We checked that the ptr is fine!"); - alloc.extra.reborrow(ptr, size, new_bor)?; + let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!"); + let permit_redundant = ref_kind == RefKind::Shr; // redundant shared refs are okay + alloc.extra.reborrow(ptr, size, new_bor, permit_redundant)?; Ok(new_bor) } + /// Called for value-to-place conversion. + /// + /// Note that this does NOT mean that all this memory will actually get accessed/referenced! + /// We could be in the middle of `&(*var).1`. fn tag_dereference( &self, ptr: Pointer, - ptr_ty: Ty<'tcx>, + pointee_ty: Ty<'tcx>, + size: Size, + ref_kind: RefKind, ) -> EvalResult<'tcx, Borrow> { - // If this is a raw ptr, forget about the tag. - Ok(if ptr_ty.is_unsafe_ptr() { - trace!("tag_dereference: Erasing tag for {:?} ({})", ptr, ptr_ty); - Borrow::Mut(Mut::Raw) - } else { - // FIXME: Do we want to adjust the tag if it does not match the type? - ptr.tag - }) + // In principle we should not have to do anything here. However, with transmutes involved, + // it can happen that the tag of `ptr` does not actually match `ref_kind`, and we + // should adjust for that. + // Notably, the compiler can introduce such transmutes by optimizing away `&[mut]*`. + // That can transmute a raw ptr to a (shared/mut) ref, and a mut ref to a shared one. + match (ref_kind, ptr.tag) { + (RefKind::Raw, _) => { + // Don't use the tag, this is a raw access! Even if there is a tag, + // that means transmute happened and we ignore the tag. + // Also don't do any further validation, this is raw after all. + return Ok(Borrow::Mut(Mut::Raw)); + } + (RefKind::Mut, Borrow::Mut(Mut::Uniq(_))) | + (RefKind::Shr, Borrow::Frz(_)) | + (RefKind::Shr, Borrow::Mut(Mut::Raw)) => { + // Expected combinations. Nothing to do. + // FIXME: We probably shouldn't accept this if we got a raw shr without + // interior mutability. + } + (RefKind::Mut, Borrow::Mut(Mut::Raw)) => { + // Raw transmuted to mut ref. Keep this as raw access. + // We cannot reborrow here; there might be a raw in `&(*var).1` where + // `var` is an `&mut`. The other field of the struct might be already frozen, + // also using `var`, and that would be okay. + } + (RefKind::Shr, Borrow::Mut(Mut::Uniq(_))) => { + // A mut got transmuted to shr. High time we freeze this location! + // Make this a delayed reborrow. Redundant reborows to shr are okay, + // so we do not have to be worried about doing too much. + // FIXME: Reconsider if we really want to mutate things while doing just a deref, + // which, in particular, validation does. + trace!("tag_dereference: Lazy freezing of {:?}", ptr); + return self.tag_reference(ptr, pointee_ty, size, ref_kind); + } + (RefKind::Mut, Borrow::Frz(_)) => { + // This is just invalid. + // If we ever allow this, we have to consider what we do when a turn a + // `Raw`-tagged `&mut` into a raw pointer pointing to a frozen location. + // We probably do not want to allow that, but we have to allow + // turning a `Raw`-tagged `&` into a raw ptr to a frozen location. + return err!(MachineError(format!("Encountered mutable reference with frozen tag {:?}", ptr.tag))) + } + } + // Even if we don't touch the tag, this operation is only okay if we *could* + // activate it. Also it must not be dangling. + self.memory().check_bounds(ptr, size, false)?; + let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!"); + let mut stacks = alloc.extra.stacks.borrow_mut(); + // We need `iter_mut` because `iter` would skip gaps! + for stack in stacks.iter_mut(ptr.offset, size) { + // We accept &mut to a frozen location here, that is just normal. There might + // be shared reborrows that we are about to invalidate with this access. + // We cannot invalidate them aggressively here because the deref might also be + // to just create more shared refs. + if let Err(err) = stack.reactivatable(ptr.tag, /*force_mut*/false) { + return err!(MachineError(format!("Encountered {:?} reference with non-reactivatable tag: {}", ref_kind, err))) + } + } + // All is good. + Ok(ptr.tag) + } + + fn tag_new_allocation( + &mut self, + id: AllocId, + kind: MemoryKind, + ) -> Borrow { + let mut_borrow = match kind { + MemoryKind::Stack => { + // New unique borrow + let time = self.machine.stacked_borrows.increment_clock(); + Mut::Uniq(time) + } + _ => { + // Raw for everything else + Mut::Raw + } + }; + // Make this the active borrow for this allocation + let alloc = self.memory_mut().get_mut(id).expect("This is a new allocation, it must still exist"); + let size = Size::from_bytes(alloc.bytes.len() as u64); + alloc.extra.first_borrow(mut_borrow, size); + Borrow::Mut(mut_borrow) } } diff --git a/tests/compile-fail-fullmir/stack_free.rs b/tests/compile-fail-fullmir/stack_free.rs index 96006c884e..6d853e75fb 100644 --- a/tests/compile-fail-fullmir/stack_free.rs +++ b/tests/compile-fail-fullmir/stack_free.rs @@ -1,3 +1,6 @@ +// Validation changes why we fail +// compile-flags: -Zmir-emit-validate=0 -Zmiri-disable-validation + // error-pattern: tried to deallocate Stack memory but gave Machine(Rust) as the kind fn main() { diff --git a/tests/compile-fail/copy_nonoverlapping.rs b/tests/compile-fail/copy_nonoverlapping.rs index 18fbc61b6d..8e8912c81f 100644 --- a/tests/compile-fail/copy_nonoverlapping.rs +++ b/tests/compile-fail/copy_nonoverlapping.rs @@ -15,8 +15,8 @@ fn main() { let mut data = [0u8; 16]; unsafe { - let a = &data[0] as *const _; - let b = &mut data[1] as *mut _; + let a = data.as_mut_ptr(); + let b = a.wrapping_offset(1) as *mut _; std::ptr::copy_nonoverlapping(a, b, 2); } } diff --git a/tests/compile-fail/stacked_borrows/alias_through_mutation.rs b/tests/compile-fail/stacked_borrows/alias_through_mutation.rs index 9fa50da45b..7d56f30b3e 100644 --- a/tests/compile-fail/stacked_borrows/alias_through_mutation.rs +++ b/tests/compile-fail/stacked_borrows/alias_through_mutation.rs @@ -1,3 +1,6 @@ +// With optimizations, we just store a raw in `x`, and there is no problem. +// compile-flags: -Zmir-opt-level=0 + #![allow(unused_variables)] // This makes a ref that was passed to us via &mut alias with things it should not alias with @@ -11,5 +14,5 @@ fn main() { retarget(&mut target_alias, target); // now `target_alias` points to the same thing as `target` *target = 13; - let _val = *target_alias; //~ ERROR should be frozen + let _val = *target_alias; //~ ERROR Shr reference with non-reactivatable tag } diff --git a/tests/compile-fail/stacked_borrows/buggy_as_mut_slice.rs b/tests/compile-fail/stacked_borrows/buggy_as_mut_slice.rs index 4857ada7fb..dc1e3cc81e 100644 --- a/tests/compile-fail/stacked_borrows/buggy_as_mut_slice.rs +++ b/tests/compile-fail/stacked_borrows/buggy_as_mut_slice.rs @@ -1,3 +1,6 @@ +// FIXME: Without retagging, optimization kills finding this problem +// compile-flags: -Zmir-opt-level=0 + #![allow(unused_variables)] mod safe { @@ -14,6 +17,6 @@ fn main() { let v = vec![0,1,2]; let v1 = safe::as_mut_slice(&v); let v2 = safe::as_mut_slice(&v); - v1[1] = 5; //~ ERROR does not exist on the stack + v1[1] = 5; //~ ERROR Mut reference with non-reactivatable tag v1[1] = 6; } diff --git a/tests/compile-fail/stacked_borrows/buggy_split_at_mut.rs b/tests/compile-fail/stacked_borrows/buggy_split_at_mut.rs index d7f4300f82..a697ba9167 100644 --- a/tests/compile-fail/stacked_borrows/buggy_split_at_mut.rs +++ b/tests/compile-fail/stacked_borrows/buggy_split_at_mut.rs @@ -1,3 +1,6 @@ +// FIXME: Without retagging, optimization kills finding this problem +// compile-flags: -Zmir-opt-level=0 + #![allow(unused_variables)] mod safe { @@ -11,6 +14,7 @@ mod safe { assert!(mid <= len); (from_raw_parts_mut(ptr, len - mid), // BUG: should be "mid" instead of "len - mid" + //~^ ERROR Mut reference with non-reactivatable tag from_raw_parts_mut(ptr.offset(mid as isize), len - mid)) } } @@ -19,6 +23,6 @@ mod safe { fn main() { let mut array = [1,2,3,4]; let (a, b) = safe::split_at_mut(&mut array, 0); - a[1] = 5; //~ ERROR does not exist on the stack + a[1] = 5; b[1] = 6; } diff --git a/tests/compile-fail/stacked_borrows/illegal_write.rs b/tests/compile-fail/stacked_borrows/illegal_write.rs deleted file mode 100644 index 6a7ccc8401..0000000000 --- a/tests/compile-fail/stacked_borrows/illegal_write.rs +++ /dev/null @@ -1,11 +0,0 @@ -fn evil(x: &u32) { - let x : &mut u32 = unsafe { &mut *(x as *const _ as *mut _) }; - *x = 42; // mutating shared ref without `UnsafeCell` -} - -fn main() { - let target = 42; - let ref_ = ⌖ - evil(ref_); // invalidates shared ref - let _x = *ref_; //~ ERROR should be frozen -} diff --git a/tests/compile-fail/stacked_borrows/illegal_write1.rs b/tests/compile-fail/stacked_borrows/illegal_write1.rs new file mode 100644 index 0000000000..131e89572a --- /dev/null +++ b/tests/compile-fail/stacked_borrows/illegal_write1.rs @@ -0,0 +1,12 @@ +fn evil(x: &u32) { + // mutating shared ref without `UnsafeCell` + let x : *mut u32 = x as *const _ as *mut _; + unsafe { *x = 42; } +} + +fn main() { + let target = Box::new(42); // has an implicit raw + let ref_ = &*target; + evil(ref_); // invalidates shared ref, activates raw + let _x = *ref_; //~ ERROR Shr reference with non-reactivatable tag +} diff --git a/tests/compile-fail/stacked_borrows/illegal_write2.rs b/tests/compile-fail/stacked_borrows/illegal_write2.rs index 1d61b1b988..ac9c3397f5 100644 --- a/tests/compile-fail/stacked_borrows/illegal_write2.rs +++ b/tests/compile-fail/stacked_borrows/illegal_write2.rs @@ -1,10 +1,13 @@ +// The reborow gets optimized away, so we can only detect this issue without optimizations +// compile-flags: -Zmir-opt-level=0 + #![allow(unused_variables)] fn main() { let target = &mut 42; let target2 = target as *mut _; drop(&mut *target); // reborrow - // Now make sure our ref is still the only one - unsafe { *target2 = 13; } // invalidate our ref - let _val = *target; //~ ERROR does not exist on the stack + // Now make sure our ref is still the only one. + unsafe { *target2 = 13; } //~ ERROR does not exist on the stack + let _val = *target; } diff --git a/tests/compile-fail/stacked_borrows/illegal_write3.rs b/tests/compile-fail/stacked_borrows/illegal_write3.rs new file mode 100644 index 0000000000..a653aa5003 --- /dev/null +++ b/tests/compile-fail/stacked_borrows/illegal_write3.rs @@ -0,0 +1,8 @@ +fn main() { + let target = 42; + // Make sure raw ptr with raw tag cannot mutate frozen location without breaking the shared ref. + let r#ref = ⌖ // freeze + let ptr = r#ref as *const _ as *mut _; // raw ptr, with raw tag + unsafe { *ptr = 42; } //~ ERROR does not exist on the stack + let _val = *r#ref; +} diff --git a/tests/compile-fail/stacked_borrows/illegal_write4.rs b/tests/compile-fail/stacked_borrows/illegal_write4.rs new file mode 100644 index 0000000000..094a389519 --- /dev/null +++ b/tests/compile-fail/stacked_borrows/illegal_write4.rs @@ -0,0 +1,31 @@ +// The compiler inserts some reborrows, enable optimizations to +// get rid of them. +// compile-flags: -Zmir-opt-level=1 + +use std::mem; + +// This is an example of a piece of code that intuitively seems like we might +// want to reject it, but that doesn't turn out to be possible. + +fn main() { + let target = 42; + // Make sure a cannot use a raw-tagged `&mut` pointing to a frozen location, not + // even to create a raw. + let reference = ⌖ // freeze + let ptr = reference as *const _ as *mut i32; // raw ptr, with raw tag + let mut_ref: &mut i32 = unsafe { mem::transmute(ptr) }; // &mut, with raw tag + // Now we have an &mut to a frozen location, but that is completely normal: + // We'd just unfreeze the location if we used it. + let bad_ptr = mut_ref as *mut i32; // even just creating this is like a use of `mut_ref`. + // That violates the location being frozen! However, we do not properly detect this: + // We first see a `&mut` with a `Raw` tag being deref'd for a frozen location, + // which can happen legitimately if the compiler optimized away an `&mut*` that + // turns a raw into a `&mut`. Next, we create a raw ref to a frozen location + // from a `Raw` tag, which can happen legitimately when interior mutability + // is involved. + let _val = *reference; // Make sure it is still frozen. + + // We only actually unfreeze once we muteate through the bad pointer. + unsafe { *bad_ptr = 42 }; //~ ERROR does not exist on the stack + let _val = *reference; +} diff --git a/tests/compile-fail/stacked_borrows/load_invalid_mut.rs b/tests/compile-fail/stacked_borrows/load_invalid_mut.rs new file mode 100644 index 0000000000..060cec962c --- /dev/null +++ b/tests/compile-fail/stacked_borrows/load_invalid_mut.rs @@ -0,0 +1,9 @@ +// Make sure that we cannot load from memory a `&mut` that got already invalidated. +fn main() { + let x = &mut 42; + let xraw = x as *mut _; + let xref = unsafe { &mut *xraw }; + let xref_in_mem = Box::new(xref); + let _val = *x; // invalidate xraw + let _val = *xref_in_mem; //~ ERROR Mut reference with non-reactivatable tag +} diff --git a/tests/compile-fail/stacked_borrows/load_invalid_shr.rs b/tests/compile-fail/stacked_borrows/load_invalid_shr.rs new file mode 100644 index 0000000000..785a15c470 --- /dev/null +++ b/tests/compile-fail/stacked_borrows/load_invalid_shr.rs @@ -0,0 +1,9 @@ +// Make sure that we cannot load from memory a `&` that got already invalidated. +fn main() { + let x = &mut 42; + let xraw = x as *mut _; + let xref = unsafe { &*xraw }; + let xref_in_mem = Box::new(xref); + let _val = *x; // invalidate xraw + let _val = *xref_in_mem; //~ ERROR Shr reference with non-reactivatable tag: Location should be frozen +} diff --git a/tests/compile-fail/stacked_borrows/outdated_local.rs b/tests/compile-fail/stacked_borrows/outdated_local.rs new file mode 100644 index 0000000000..64a8ff6910 --- /dev/null +++ b/tests/compile-fail/stacked_borrows/outdated_local.rs @@ -0,0 +1,9 @@ +fn main() { + let mut x = 0; + let y: *const i32 = &x; + x = 1; // this invalidates y by reactivating the lowermost uniq borrow for this local + + assert_eq!(unsafe { *y }, 1); //~ ERROR does not exist on the stack + + assert_eq!(x, 1); +} diff --git a/tests/compile-fail/stacked_borrows/pass_invalid_mut.rs b/tests/compile-fail/stacked_borrows/pass_invalid_mut.rs new file mode 100644 index 0000000000..bc950771ad --- /dev/null +++ b/tests/compile-fail/stacked_borrows/pass_invalid_mut.rs @@ -0,0 +1,10 @@ +// Make sure that we cannot pass by argument a `&mut` that got already invalidated. +fn foo(_: &mut i32) {} + +fn main() { + let x = &mut 42; + let xraw = x as *mut _; + let xref = unsafe { &mut *xraw }; + let _val = *x; // invalidate xraw + foo(xref); //~ ERROR Mut reference with non-reactivatable tag +} diff --git a/tests/compile-fail/stacked_borrows/pass_invalid_shr.rs b/tests/compile-fail/stacked_borrows/pass_invalid_shr.rs new file mode 100644 index 0000000000..8b7a846d84 --- /dev/null +++ b/tests/compile-fail/stacked_borrows/pass_invalid_shr.rs @@ -0,0 +1,10 @@ +// Make sure that we cannot pass by argument a `&` that got already invalidated. +fn foo(_: &i32) {} + +fn main() { + let x = &mut 42; + let xraw = &*x as *const _; + let xref = unsafe { &*xraw }; + let _val = *x; // invalidate xraw + foo(xref); //~ ERROR Shr reference with non-reactivatable tag: Location should be frozen +} diff --git a/tests/compile-fail/stacked_borrows/pointer_smuggling.rs b/tests/compile-fail/stacked_borrows/pointer_smuggling.rs index 3576aa52b7..68f3d2923b 100644 --- a/tests/compile-fail/stacked_borrows/pointer_smuggling.rs +++ b/tests/compile-fail/stacked_borrows/pointer_smuggling.rs @@ -10,7 +10,7 @@ fn fun1(x: &mut u8) { fn fun2() { // Now we use a pointer we are not allowed to use - let _x = unsafe { *PTR }; + let _x = unsafe { *PTR }; //~ ERROR does not exist on the stack } fn main() { @@ -18,5 +18,4 @@ fn main() { fun1(val); *val = 2; // this invalidates any raw ptrs `fun1` might have created. fun2(); // if they now use a raw ptr they break our reference - *val = 3; //~ ERROR does not exist on the stack } diff --git a/tests/compile-fail/stacked_borrows/return_invalid_mut.rs b/tests/compile-fail/stacked_borrows/return_invalid_mut.rs new file mode 100644 index 0000000000..c02892671e --- /dev/null +++ b/tests/compile-fail/stacked_borrows/return_invalid_mut.rs @@ -0,0 +1,11 @@ +// Make sure that we cannot return a `&mut` that got already invalidated. +fn foo(x: &mut (i32, i32)) -> &mut i32 { + let xraw = x as *mut (i32, i32); + let ret = unsafe { &mut (*xraw).1 }; + let _val = *x; // invalidate xraw and its children + ret //~ ERROR Mut reference with non-reactivatable tag +} + +fn main() { + foo(&mut (1, 2)); +} diff --git a/tests/compile-fail/stacked_borrows/return_invalid_shr.rs b/tests/compile-fail/stacked_borrows/return_invalid_shr.rs new file mode 100644 index 0000000000..89c94127b0 --- /dev/null +++ b/tests/compile-fail/stacked_borrows/return_invalid_shr.rs @@ -0,0 +1,11 @@ +// Make sure that we cannot return a `&` that got already invalidated. +fn foo(x: &mut (i32, i32)) -> &i32 { + let xraw = x as *mut (i32, i32); + let ret = unsafe { &(*xraw).1 }; + let _val = *x; // invalidate xraw and its children + ret //~ ERROR Shr reference with non-reactivatable tag: Location should be frozen +} + +fn main() { + foo(&mut (1, 2)); +} diff --git a/tests/compile-fail/stacked_borrows/shared_confusion.rs b/tests/compile-fail/stacked_borrows/shared_confusion.rs index 584053f593..e3e4c4da77 100644 --- a/tests/compile-fail/stacked_borrows/shared_confusion.rs +++ b/tests/compile-fail/stacked_borrows/shared_confusion.rs @@ -13,7 +13,7 @@ fn test(r: &mut RefCell) { } // Our old raw should be dead by now unsafe { *x_evil = 0; } // this falls back to some Raw higher up the stack - *x_inner = 12; //~ ERROR does not exist on the stack + *x_inner = 12; //~ ERROR Mut reference with non-reactivatable tag } fn main() { diff --git a/tests/compile-fail/stacked_borrows/unescaped_local.rs b/tests/compile-fail/stacked_borrows/unescaped_local.rs new file mode 100644 index 0000000000..8627cc44c2 --- /dev/null +++ b/tests/compile-fail/stacked_borrows/unescaped_local.rs @@ -0,0 +1,10 @@ +use std::mem; + +// Make sure we cannot use raw ptrs to access a local that +// has never been escaped to the raw world. +fn main() { + let mut x = 42; + let ptr = &mut x; + let raw: *mut i32 = unsafe { mem::transmute(ptr) }; + unsafe { *raw = 13; } //~ ERROR does not exist on the stack +} diff --git a/tests/compile-fail/static_memory_modification.rs b/tests/compile-fail/static_memory_modification.rs index 304ab6c6b7..c758926458 100644 --- a/tests/compile-fail/static_memory_modification.rs +++ b/tests/compile-fail/static_memory_modification.rs @@ -1,3 +1,6 @@ +// Validation detects that we are casting & to &mut and so it changes why we fail +// compile-flags: -Zmir-emit-validate=0 -Zmiri-disable-validation + static X: usize = 5; #[allow(mutable_transmutes)] diff --git a/tests/compile-fail/static_memory_modification2.rs b/tests/compile-fail/static_memory_modification2.rs index 01c3b9bb2d..c9857b2059 100644 --- a/tests/compile-fail/static_memory_modification2.rs +++ b/tests/compile-fail/static_memory_modification2.rs @@ -1,5 +1,5 @@ // Validation detects that we are casting & to &mut and so it changes why we fail -// compile-flags: -Zmir-emit-validate=0 +// compile-flags: -Zmir-emit-validate=0 -Zmiri-disable-validation use std::mem::transmute; diff --git a/tests/compile-fail/static_memory_modification3.rs b/tests/compile-fail/static_memory_modification3.rs index ff09aad1bd..41a6278729 100644 --- a/tests/compile-fail/static_memory_modification3.rs +++ b/tests/compile-fail/static_memory_modification3.rs @@ -1,3 +1,6 @@ +// Validation detects that we are casting & to &mut and so it changes why we fail +// compile-flags: -Zmir-emit-validate=0 -Zmiri-disable-validation + use std::mem::transmute; #[allow(mutable_transmutes)] diff --git a/tests/compile-fail/unaligned_ptr_cast.rs b/tests/compile-fail/unaligned_ptr_cast.rs index 88285dc69f..47317afd36 100644 --- a/tests/compile-fail/unaligned_ptr_cast.rs +++ b/tests/compile-fail/unaligned_ptr_cast.rs @@ -1,3 +1,6 @@ +// This should fail even without validation +// compile-flags: -Zmir-emit-validate=0 -Zmiri-disable-validation + fn main() { let x = &2u16; let x = x as *const _ as *const u32; diff --git a/tests/compile-fail/unaligned_ptr_cast2.rs b/tests/compile-fail/unaligned_ptr_cast2.rs index 7541079def..d146f21dfe 100644 --- a/tests/compile-fail/unaligned_ptr_cast2.rs +++ b/tests/compile-fail/unaligned_ptr_cast2.rs @@ -1,3 +1,6 @@ +// This should fail even without validation +// compile-flags: -Zmir-emit-validate=0 -Zmiri-disable-validation + fn main() { let x = &2u16; let x = x as *const _ as *const *const u8; diff --git a/tests/compile-fail/validity/dangling_ref1.rs b/tests/compile-fail/validity/dangling_ref1.rs new file mode 100644 index 0000000000..c5845cb693 --- /dev/null +++ b/tests/compile-fail/validity/dangling_ref1.rs @@ -0,0 +1,5 @@ +use std::mem; + +fn main() { + let _x: &i32 = unsafe { mem::transmute(16usize) }; //~ ERROR tried to interpret some bytes as a pointer +} diff --git a/tests/compile-fail/validity/dangling_ref2.rs b/tests/compile-fail/validity/dangling_ref2.rs new file mode 100644 index 0000000000..21650ebf95 --- /dev/null +++ b/tests/compile-fail/validity/dangling_ref2.rs @@ -0,0 +1,7 @@ +use std::mem; + +fn main() { + let val = 14; + let ptr = (&val as *const i32).wrapping_offset(1); + let _x: &i32 = unsafe { mem::transmute(ptr) }; //~ ERROR outside bounds of allocation +} diff --git a/tests/compile-fail/validity/undef.rs b/tests/compile-fail/validity/undef.rs index 58d3926dad..f86fef9454 100644 --- a/tests/compile-fail/validity/undef.rs +++ b/tests/compile-fail/validity/undef.rs @@ -1,5 +1,5 @@ #![allow(unused_variables)] -// error-pattern: encountered undefined data in pointer +// error-pattern: encountered undefined address in pointer use std::mem; diff --git a/tests/compile-fail/zst.rs b/tests/compile-fail/zst.rs index 2b179dcc8a..544d65a1bf 100644 --- a/tests/compile-fail/zst.rs +++ b/tests/compile-fail/zst.rs @@ -1,4 +1,4 @@ fn main() { let x = &() as *const () as *const i32; - let _ = unsafe { *x }; //~ ERROR tried to access memory with alignment 1, but alignment 4 is required + let _ = unsafe { *x }; //~ ERROR access memory with alignment 1, but alignment 4 is required } diff --git a/tests/compiletest.rs b/tests/compiletest.rs index 0e6fa3d2a1..0b2058597e 100644 --- a/tests/compiletest.rs +++ b/tests/compiletest.rs @@ -37,7 +37,7 @@ fn have_fullmir() -> bool { std::env::var("MIRI_SYSROOT").is_ok() || rustc_test_suite().is_some() } -fn compile_fail(sysroot: &Path, path: &str, target: &str, host: &str, need_fullmir: bool) { +fn compile_fail(sysroot: &Path, path: &str, target: &str, host: &str, need_fullmir: bool, opt: bool) { if need_fullmir && !have_fullmir() { eprintln!("{}", format!( "## Skipping compile-fail tests in {} against miri for target {} due to missing mir", @@ -47,24 +47,34 @@ fn compile_fail(sysroot: &Path, path: &str, target: &str, host: &str, need_fullm return; } + let opt_str = if opt { " with optimizations" } else { "" }; eprintln!("{}", format!( - "## Running compile-fail tests in {} against miri for target {}", + "## Running compile-fail tests in {} against miri for target {}{}", path, - target + target, + opt_str ).green().bold()); + + let mut flags = Vec::new(); + flags.push(format!("--sysroot {}", sysroot.display())); + flags.push("-Dwarnings -Dunused".to_owned()); // overwrite the -Aunused in compiletest-rs + flags.push("-Zmir-emit-validate=1".to_owned()); + if opt { + // Optimizing too aggressivley makes UB detection harder, but test at least + // the default value. + flags.push("-Zmir-opt-level=1".to_owned()); + } else { + flags.push("-Zmir-opt-level=0".to_owned()); + } + let mut config = compiletest::Config::default().tempdir(); config.mode = "compile-fail".parse().expect("Invalid mode"); config.rustc_path = miri_path(); - let mut flags = Vec::new(); if rustc_test_suite().is_some() { config.run_lib_path = rustc_lib_path(); config.compile_lib_path = rustc_lib_path(); } - flags.push(format!("--sysroot {}", sysroot.display())); - flags.push("-Dwarnings -Dunused".to_owned()); // overwrite the -Aunused in compiletest-rs config.src_base = PathBuf::from(path.to_string()); - flags.push("-Zmir-opt-level=0".to_owned()); // optimization circumvents some stacked borrow checks - flags.push("-Zmir-emit-validate=1".to_owned()); config.target_rustcflags = Some(flags.join(" ")); config.target = target.to_owned(); config.host = host.to_owned(); @@ -88,6 +98,17 @@ fn miri_pass(sysroot: &Path, path: &str, target: &str, host: &str, need_fullmir: target, opt_str ).green().bold()); + + let mut flags = Vec::new(); + flags.push(format!("--sysroot {}", sysroot.display())); + flags.push("-Dwarnings -Dunused".to_owned()); // overwrite the -Aunused in compiletest-rs + flags.push("-Zmir-emit-validate=1".to_owned()); + if opt { + flags.push("-Zmir-opt-level=3".to_owned()); + } else { + flags.push("-Zmir-opt-level=0".to_owned()); + } + let mut config = compiletest::Config::default().tempdir(); config.mode = "ui".parse().expect("Invalid mode"); config.src_base = PathBuf::from(path); @@ -98,16 +119,6 @@ fn miri_pass(sysroot: &Path, path: &str, target: &str, host: &str, need_fullmir: config.run_lib_path = rustc_lib_path(); config.compile_lib_path = rustc_lib_path(); } - let mut flags = Vec::new(); - flags.push(format!("--sysroot {}", sysroot.display())); - flags.push("-Dwarnings -Dunused".to_owned()); // overwrite the -Aunused in compiletest-rs - if opt { - flags.push("-Zmir-opt-level=3".to_owned()); - } else { - flags.push("-Zmir-opt-level=0".to_owned()); - // For now, only validate without optimizations. Inlining breaks validation. - flags.push("-Zmir-emit-validate=1".to_owned()); - } config.target_rustcflags = Some(flags.join(" ")); compiletest::run_tests(&config); } @@ -169,13 +180,13 @@ fn run_pass_miri(opt: bool) { miri_pass(&sysroot, "tests/run-pass-fullmir", &host, &host, true, opt); } -fn compile_fail_miri() { +fn compile_fail_miri(opt: bool) { let sysroot = get_sysroot(); let host = get_host(); // FIXME: run tests for other targets, too - compile_fail(&sysroot, "tests/compile-fail", &host, &host, false); - compile_fail(&sysroot, "tests/compile-fail-fullmir", &host, &host, true); + compile_fail(&sysroot, "tests/compile-fail", &host, &host, false, opt); + compile_fail(&sysroot, "tests/compile-fail-fullmir", &host, &host, true, opt); } #[test] @@ -187,5 +198,6 @@ fn test() { run_pass_miri(false); run_pass_miri(true); - compile_fail_miri(); + compile_fail_miri(false); + compile_fail_miri(true); } diff --git a/tests/run-pass-fullmir/integer-ops.rs b/tests/run-pass-fullmir/integer-ops.rs index 7a2335c829..0264099eb6 100644 --- a/tests/run-pass-fullmir/integer-ops.rs +++ b/tests/run-pass-fullmir/integer-ops.rs @@ -8,9 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// FIXME: remove -Zmir-opt-level once https://github.com/rust-lang/rust/issues/43359 is fixed -// compile-flags: -Zmir-opt-level=0 - use std::i32; pub fn main() { diff --git a/tests/run-pass-fullmir/send-is-not-static-par-for.rs b/tests/run-pass-fullmir/send-is-not-static-par-for.rs index 1b913aed4c..282f7a3595 100644 --- a/tests/run-pass-fullmir/send-is-not-static-par-for.rs +++ b/tests/run-pass-fullmir/send-is-not-static-par-for.rs @@ -8,6 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// FIXME: Still investigating whether there is UB here +// compile-flags: -Zmiri-disable-validation + use std::sync::Mutex; fn par_for(iter: I, f: F) diff --git a/tests/run-pass-fullmir/u128.rs b/tests/run-pass-fullmir/u128.rs index ca33bd5f9e..0f1b6e8b58 100644 --- a/tests/run-pass-fullmir/u128.rs +++ b/tests/run-pass-fullmir/u128.rs @@ -8,6 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// FIXME: Still investigating whether there is UB here +// compile-flags: -Zmiri-disable-validation + fn b(t: T) -> T { t } fn main() { diff --git a/tests/run-pass-fullmir/vecdeque.rs b/tests/run-pass-fullmir/vecdeque.rs index 381169505e..ffbb116684 100644 --- a/tests/run-pass-fullmir/vecdeque.rs +++ b/tests/run-pass-fullmir/vecdeque.rs @@ -1,3 +1,6 @@ +// FIXME: Still investigating whether there is UB here +// compile-flags: -Zmiri-disable-validation + use std::collections::VecDeque; fn main() { diff --git a/tests/run-pass/deref_partially_dangling_raw.rs b/tests/run-pass/deref_partially_dangling_raw.rs new file mode 100644 index 0000000000..8639a28936 --- /dev/null +++ b/tests/run-pass/deref_partially_dangling_raw.rs @@ -0,0 +1,9 @@ +// Deref a raw ptr to access a field of a large struct, where the field +// is allocated but not the entire struct is. +// For now, we want to allow this. + +fn main() { + let x = (1, 1); + let xptr = &x as *const _ as *const (i32, i32, i32); + let _val = unsafe { (*xptr).1 }; +} diff --git a/tests/run-pass/observed_local_mut.rs b/tests/run-pass/observed_local_mut.rs index a4ecf1e635..c58e8c84bb 100644 --- a/tests/run-pass/observed_local_mut.rs +++ b/tests/run-pass/observed_local_mut.rs @@ -1,3 +1,6 @@ +// Validation catches this (correctly) as UB. +// compile-flags: -Zmiri-disable-validation + // This test is intended to guard against the problem described in commit // 39bb1254d1eaf74f45a4e741097e33fc942168d5. // diff --git a/tests/run-pass/ptr_arith_offset_overflow.rs b/tests/run-pass/ptr_arith_offset_overflow.rs index 3383c3b801..6b778248be 100644 --- a/tests/run-pass/ptr_arith_offset_overflow.rs +++ b/tests/run-pass/ptr_arith_offset_overflow.rs @@ -1,6 +1,6 @@ fn main() { let v = [1i16, 2]; - let x = &v[1] as *const i16; + let x = v.as_ptr().wrapping_offset(1); // ptr to the 2nd element // Adding 2*isize::max and then 1 is like substracting 1 let x = x.wrapping_offset(isize::max_value()); let x = x.wrapping_offset(isize::max_value());