diff --git a/src/librustc_mir/const_eval/eval_queries.rs b/src/librustc_mir/const_eval/eval_queries.rs index dbeb75b60c290..53f3b539bdaa0 100644 --- a/src/librustc_mir/const_eval/eval_queries.rs +++ b/src/librustc_mir/const_eval/eval_queries.rs @@ -115,28 +115,31 @@ pub(super) fn op_to_const<'tcx>( // by-val is if we are in const_field, i.e., if this is (a field of) something that we // "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or // structs containing such. - op.try_as_mplace() + op.try_as_mplace(ecx) }; - let val = match immediate { - Ok(mplace) => { - let ptr = mplace.ptr.assert_ptr(); + + let to_const_value = |mplace: MPlaceTy<'_>| match mplace.ptr { + Scalar::Ptr(ptr) => { let alloc = ecx.tcx.alloc_map.lock().unwrap_memory(ptr.alloc_id); ConstValue::ByRef { alloc, offset: ptr.offset } } + Scalar::Raw { data, .. } => { + assert!(mplace.layout.is_zst()); + assert_eq!( + data, + mplace.layout.align.abi.bytes().into(), + "this MPlaceTy must come from `try_as_mplace` being used on a zst, so we know what + value this integer address must have", + ); + ConstValue::Scalar(Scalar::zst()) + } + }; + let val = match immediate { + Ok(mplace) => to_const_value(mplace), // see comment on `let try_as_immediate` above Err(ImmTy { imm: Immediate::Scalar(x), .. }) => match x { ScalarMaybeUndef::Scalar(s) => ConstValue::Scalar(s), - ScalarMaybeUndef::Undef => { - // When coming out of "normal CTFE", we'll always have an `Indirect` operand as - // argument and we will not need this. The only way we can already have an - // `Immediate` is when we are called from `const_field`, and that `Immediate` - // comes from a constant so it can happen have `Undef`, because the indirect - // memory that was read had undefined bytes. - let mplace = op.assert_mem_place(); - let ptr = mplace.ptr.assert_ptr(); - let alloc = ecx.tcx.alloc_map.lock().unwrap_memory(ptr.alloc_id); - ConstValue::ByRef { alloc, offset: ptr.offset } - } + ScalarMaybeUndef::Undef => to_const_value(op.assert_mem_place(ecx)), }, Err(ImmTy { imm: Immediate::ScalarPair(a, b), .. }) => { let (data, start) = match a.not_undef().unwrap() { diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 551e3e837c988..864f4f9487c88 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -20,7 +20,7 @@ use rustc_macros::HashStable; use rustc_span::source_map::{self, Span, DUMMY_SP}; use super::{ - Immediate, MPlaceTy, Machine, MemPlace, Memory, OpTy, Operand, Place, PlaceTy, + Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, OpTy, Operand, Place, PlaceTy, ScalarMaybeUndef, StackPopInfo, }; @@ -393,7 +393,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// This can fail to provide an answer for extern types. pub(super) fn size_and_align_of( &self, - metadata: Option>, + metadata: MemPlaceMeta, layout: TyLayout<'tcx>, ) -> InterpResult<'tcx, Option<(Size, Align)>> { if !layout.is_unsized() { @@ -465,14 +465,13 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Ok(Some((size, align))) } ty::Dynamic(..) => { - let vtable = metadata.expect("dyn trait fat ptr must have vtable"); + let vtable = metadata.unwrap_meta(); // Read size and align from vtable (already checks size). Ok(Some(self.read_size_and_align_from_vtable(vtable)?)) } ty::Slice(_) | ty::Str => { - let len = - metadata.expect("slice fat ptr must have length").to_machine_usize(self)?; + let len = metadata.unwrap_meta().to_machine_usize(self)?; let elem = layout.field(self, 0)?; // Make sure the slice is not too big. @@ -818,8 +817,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { " by align({}){} ref:", mplace.align.bytes(), match mplace.meta { - Some(meta) => format!(" meta({:?})", meta), - None => String::new(), + MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta), + MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(), } ) .unwrap(); diff --git a/src/librustc_mir/interpret/intern.rs b/src/librustc_mir/interpret/intern.rs index 7c6129ef30ffd..9b3a2fa36f794 100644 --- a/src/librustc_mir/interpret/intern.rs +++ b/src/librustc_mir/interpret/intern.rs @@ -193,7 +193,7 @@ impl<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx { // Validation has already errored on an invalid vtable pointer so we can safely not // do anything if this is not a real pointer. - if let Scalar::Ptr(vtable) = mplace.meta.unwrap() { + if let Scalar::Ptr(vtable) = mplace.meta.unwrap_meta() { // Explicitly choose `Immutable` here, since vtables are immutable, even // if the reference of the fat pointer is mutable. self.intern_shallow(vtable.alloc_id, Mutability::Not, None)?; @@ -226,7 +226,8 @@ impl<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx | (InternMode::Const, hir::Mutability::Mut) => match referenced_ty.kind { ty::Array(_, n) if n.eval_usize(self.ecx.tcx.tcx, self.ecx.param_env) == 0 => {} - ty::Slice(_) if mplace.meta.unwrap().to_machine_usize(self.ecx)? == 0 => {} + ty::Slice(_) + if mplace.meta.unwrap_meta().to_machine_usize(self.ecx)? == 0 => {} _ => bug!("const qualif failed to prevent mutable references"), }, } diff --git a/src/librustc_mir/interpret/mod.rs b/src/librustc_mir/interpret/mod.rs index 0d35eae6ed08d..2e8fbb95ca2e5 100644 --- a/src/librustc_mir/interpret/mod.rs +++ b/src/librustc_mir/interpret/mod.rs @@ -20,7 +20,7 @@ pub use rustc::mir::interpret::*; // have all the `interpret` symbols in one pla pub use self::eval_context::{Frame, InterpCx, LocalState, LocalValue, StackPopCleanup}; -pub use self::place::{MPlaceTy, MemPlace, Place, PlaceTy}; +pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy}; pub use self::memory::{AllocCheck, FnVal, Memory, MemoryKind}; diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index def979b63b52a..ddd9776e89383 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -153,30 +153,6 @@ pub enum Operand { Indirect(MemPlace), } -impl Operand { - #[inline] - pub fn assert_mem_place(self) -> MemPlace - where - Tag: ::std::fmt::Debug, - { - match self { - Operand::Indirect(mplace) => mplace, - _ => bug!("assert_mem_place: expected Operand::Indirect, got {:?}", self), - } - } - - #[inline] - pub fn assert_immediate(self) -> Immediate - where - Tag: ::std::fmt::Debug, - { - match self { - Operand::Immediate(imm) => imm, - _ => bug!("assert_immediate: expected Operand::Immediate, got {:?}", self), - } - } -} - #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub struct OpTy<'tcx, Tag = ()> { op: Operand, // Keep this private; it helps enforce invariants. @@ -267,7 +243,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { &self, op: OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { - match op.try_as_mplace() { + match op.try_as_mplace(self) { Ok(mplace) => Ok(self.force_mplace_ptr(mplace)?.into()), Err(imm) => Ok(imm.into()), // Nothing to cast/force } @@ -335,7 +311,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { &self, src: OpTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, Result, MPlaceTy<'tcx, M::PointerTag>>> { - Ok(match src.try_as_mplace() { + Ok(match src.try_as_mplace(self) { Ok(mplace) => { if let Some(val) = self.try_read_immediate_from_mplace(mplace)? { Ok(val) @@ -383,7 +359,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { op: OpTy<'tcx, M::PointerTag>, field: u64, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { - let base = match op.try_as_mplace() { + let base = match op.try_as_mplace(self) { Ok(mplace) => { // The easy case let field = self.mplace_field(mplace, field)?; @@ -420,7 +396,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { variant: VariantIdx, ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { // Downcasts only change the layout - Ok(match op.try_as_mplace() { + Ok(match op.try_as_mplace(self) { Ok(mplace) => self.mplace_downcast(mplace, variant)?.into(), Err(..) => { let layout = op.layout.for_variant(self, variant); @@ -439,30 +415,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Field(field, _) => self.operand_field(base, field.index() as u64)?, Downcast(_, variant) => self.operand_downcast(base, variant)?, Deref => self.deref_operand(base)?.into(), - ConstantIndex { .. } | Index(_) if base.layout.is_zst() => { - OpTy { - op: Operand::Immediate(Scalar::zst().into()), - // the actual index doesn't matter, so we just pick a convenient one like 0 - layout: base.layout.field(self, 0)?, - } - } - Subslice { from, to, from_end } if base.layout.is_zst() => { - let elem_ty = if let ty::Array(elem_ty, _) = base.layout.ty.kind { - elem_ty - } else { - bug!("slices shouldn't be zero-sized"); - }; - assert!(!from_end, "arrays shouldn't be subsliced from the end"); - - OpTy { - op: Operand::Immediate(Scalar::zst().into()), - layout: self.layout_of(self.tcx.mk_array(elem_ty, (to - from) as u64))?, - } - } Subslice { .. } | ConstantIndex { .. } | Index(_) => { // The rest should only occur as mplace, we do not use Immediates for types // allowing such operations. This matches place_projection forcing an allocation. - let mplace = base.assert_mem_place(); + let mplace = base.assert_mem_place(self); self.mplace_projection(mplace, proj_elem)?.into() } }) diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index f4ac7de852af0..890627a54543a 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -20,6 +20,47 @@ use super::{ RawConst, Scalar, ScalarMaybeUndef, }; +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)] +/// Information required for the sound usage of a `MemPlace`. +pub enum MemPlaceMeta { + /// The unsized payload (e.g. length for slices or vtable pointer for trait objects). + Meta(Scalar), + /// `Sized` types or unsized `extern type` + None, + /// The address of this place may not be taken. This protects the `MemPlace` from coming from + /// a ZST Operand with a backing allocation and being converted to an integer address. This + /// should be impossible, because you can't take the address of an operand, but this is a second + /// protection layer ensuring that we don't mess up. + Poison, +} + +impl MemPlaceMeta { + pub fn unwrap_meta(self) -> Scalar { + match self { + Self::Meta(s) => s, + Self::None | Self::Poison => { + bug!("expected wide pointer extra data (e.g. slice length or trait object vtable)") + } + } + } + fn has_meta(self) -> bool { + match self { + Self::Meta(_) => true, + Self::None | Self::Poison => false, + } + } +} + +impl MemPlaceMeta { + pub fn erase_tag(self) -> MemPlaceMeta<()> { + match self { + Self::Meta(s) => MemPlaceMeta::Meta(s.erase_tag()), + Self::None => MemPlaceMeta::None, + Self::Poison => MemPlaceMeta::Poison, + } + } +} + #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)] pub struct MemPlace { /// A place may have an integral pointer for ZSTs, and since it might @@ -30,7 +71,7 @@ pub struct MemPlace { /// Metadata for unsized places. Interpretation is up to the type. /// Must not be present for sized types, but can be missing for unsized types /// (e.g., `extern type`). - pub meta: Option>, + pub meta: MemPlaceMeta, } #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)] @@ -88,21 +129,17 @@ impl MemPlace { #[inline] pub fn erase_tag(self) -> MemPlace { - MemPlace { - ptr: self.ptr.erase_tag(), - align: self.align, - meta: self.meta.map(Scalar::erase_tag), - } + MemPlace { ptr: self.ptr.erase_tag(), align: self.align, meta: self.meta.erase_tag() } } #[inline(always)] - pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { - MemPlace { ptr, align, meta: None } + fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { + MemPlace { ptr, align, meta: MemPlaceMeta::None } } /// Produces a Place that will error if attempted to be read from or written to #[inline(always)] - pub fn null(cx: &impl HasDataLayout) -> Self { + fn null(cx: &impl HasDataLayout) -> Self { Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1).unwrap()) } @@ -116,15 +153,19 @@ impl MemPlace { #[inline(always)] pub fn to_ref(self) -> Immediate { match self.meta { - None => Immediate::Scalar(self.ptr.into()), - Some(meta) => Immediate::ScalarPair(self.ptr.into(), meta.into()), + MemPlaceMeta::None => Immediate::Scalar(self.ptr.into()), + MemPlaceMeta::Meta(meta) => Immediate::ScalarPair(self.ptr.into(), meta.into()), + MemPlaceMeta::Poison => bug!( + "MPlaceTy::dangling may never be used to produce a \ + place that will have the address of its pointee taken" + ), } } pub fn offset( self, offset: Size, - meta: Option>, + meta: MemPlaceMeta, cx: &impl HasDataLayout, ) -> InterpResult<'tcx, Self> { Ok(MemPlace { @@ -139,13 +180,10 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { /// Produces a MemPlace that works for ZST but nothing else #[inline] pub fn dangling(layout: TyLayout<'tcx>, cx: &impl HasDataLayout) -> Self { - MPlaceTy { - mplace: MemPlace::from_scalar_ptr( - Scalar::from_uint(layout.align.abi.bytes(), cx.pointer_size()), - layout.align.abi, - ), - layout, - } + let align = layout.align.abi; + let ptr = Scalar::from_uint(align.bytes(), cx.pointer_size()); + // `Poison` this to make sure that the pointer value `ptr` is never observable by the program. + MPlaceTy { mplace: MemPlace { ptr, align, meta: MemPlaceMeta::Poison }, layout } } /// Replace ptr tag, maintain vtable tag (if any) @@ -158,7 +196,7 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { pub fn offset( self, offset: Size, - meta: Option>, + meta: MemPlaceMeta, layout: TyLayout<'tcx>, cx: &impl HasDataLayout, ) -> InterpResult<'tcx, Self> { @@ -175,7 +213,9 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { if self.layout.is_unsized() { // We need to consult `meta` metadata match self.layout.ty.kind { - ty::Slice(..) | ty::Str => return self.mplace.meta.unwrap().to_machine_usize(cx), + ty::Slice(..) | ty::Str => { + return self.mplace.meta.unwrap_meta().to_machine_usize(cx); + } _ => bug!("len not supported on unsized type {:?}", self.layout.ty), } } else { @@ -191,7 +231,7 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { #[inline] pub(super) fn vtable(self) -> Scalar { match self.layout.ty.kind { - ty::Dynamic(..) => self.mplace.meta.unwrap(), + ty::Dynamic(..) => self.mplace.meta.unwrap_meta(), _ => bug!("vtable not supported on type {:?}", self.layout.ty), } } @@ -200,36 +240,36 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { // These are defined here because they produce a place. impl<'tcx, Tag: ::std::fmt::Debug + Copy> OpTy<'tcx, Tag> { #[inline(always)] - pub fn try_as_mplace(self) -> Result, ImmTy<'tcx, Tag>> { + /// Note: do not call `as_ref` on the resulting place. This function should only be used to + /// read from the resulting mplace, not to get its address back. + pub fn try_as_mplace( + self, + cx: &impl HasDataLayout, + ) -> Result, ImmTy<'tcx, Tag>> { match *self { Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }), + Operand::Immediate(_) if self.layout.is_zst() => { + Ok(MPlaceTy::dangling(self.layout, cx)) + } Operand::Immediate(imm) => Err(ImmTy { imm, layout: self.layout }), } } #[inline(always)] - pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> { - self.try_as_mplace().unwrap() + /// Note: do not call `as_ref` on the resulting place. This function should only be used to + /// read from the resulting mplace, not to get its address back. + pub fn assert_mem_place(self, cx: &impl HasDataLayout) -> MPlaceTy<'tcx, Tag> { + self.try_as_mplace(cx).unwrap() } } impl Place { /// Produces a Place that will error if attempted to be read from or written to #[inline(always)] - pub fn null(cx: &impl HasDataLayout) -> Self { + fn null(cx: &impl HasDataLayout) -> Self { Place::Ptr(MemPlace::null(cx)) } - #[inline(always)] - pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { - Place::Ptr(MemPlace::from_scalar_ptr(ptr, align)) - } - - #[inline(always)] - pub fn from_ptr(ptr: Pointer, align: Align) -> Self { - Place::Ptr(MemPlace::from_ptr(ptr, align)) - } - #[inline] pub fn assert_mem_place(self) -> MemPlace { match self { @@ -270,8 +310,10 @@ where val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty; let layout = self.layout_of(pointee_type)?; let (ptr, meta) = match *val { - Immediate::Scalar(ptr) => (ptr.not_undef()?, None), - Immediate::ScalarPair(ptr, meta) => (ptr.not_undef()?, Some(meta.not_undef()?)), + Immediate::Scalar(ptr) => (ptr.not_undef()?, MemPlaceMeta::None), + Immediate::ScalarPair(ptr, meta) => { + (ptr.not_undef()?, MemPlaceMeta::Meta(meta.not_undef()?)) + } }; let mplace = MemPlace { @@ -305,14 +347,14 @@ where /// On success, returns `None` for zero-sized accesses (where nothing else is /// left to do) and a `Pointer` to use for the actual access otherwise. #[inline] - pub fn check_mplace_access( + pub(super) fn check_mplace_access( &self, place: MPlaceTy<'tcx, M::PointerTag>, size: Option, ) -> InterpResult<'tcx, Option>> { let size = size.unwrap_or_else(|| { assert!(!place.layout.is_unsized()); - assert!(place.meta.is_none()); + assert!(!place.meta.has_meta()); place.layout.size }); self.memory.check_ptr_access(place.ptr, size, place.align) @@ -338,7 +380,7 @@ where /// Force `place.ptr` to a `Pointer`. /// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot. - pub fn force_mplace_ptr( + pub(super) fn force_mplace_ptr( &self, mut place: MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { @@ -405,7 +447,7 @@ where } else { // base.meta could be present; we might be accessing a sized field of an unsized // struct. - (None, offset) + (MemPlaceMeta::None, offset) }; // We do not look at `base.layout.align` nor `field_layout.align`, unlike @@ -415,7 +457,7 @@ where // Iterates over all fields of an array. Much more efficient than doing the // same by repeatedly calling `mplace_array`. - pub fn mplace_array_fields( + pub(super) fn mplace_array_fields( &self, base: MPlaceTy<'tcx, Tag>, ) -> InterpResult<'tcx, impl Iterator>> + 'tcx> @@ -427,10 +469,10 @@ where }; let layout = base.layout.field(self, 0)?; let dl = &self.tcx.data_layout; - Ok((0..len).map(move |i| base.offset(i * stride, None, layout, dl))) + Ok((0..len).map(move |i| base.offset(i * stride, MemPlaceMeta::None, layout, dl))) } - pub fn mplace_subslice( + fn mplace_subslice( &self, base: MPlaceTy<'tcx, M::PointerTag>, from: u64, @@ -460,10 +502,10 @@ where let (meta, ty) = match base.layout.ty.kind { // It is not nice to match on the type, but that seems to be the only way to // implement this. - ty::Array(inner, _) => (None, self.tcx.mk_array(inner, inner_len)), + ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(inner, inner_len)), ty::Slice(..) => { let len = Scalar::from_uint(inner_len, self.pointer_size()); - (Some(len), base.layout.ty) + (MemPlaceMeta::Meta(len), base.layout.ty) } _ => bug!("cannot subslice non-array type: `{:?}`", base.layout.ty), }; @@ -471,18 +513,18 @@ where base.offset(from_offset, meta, layout, self) } - pub fn mplace_downcast( + pub(super) fn mplace_downcast( &self, base: MPlaceTy<'tcx, M::PointerTag>, variant: VariantIdx, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { // Downcasts only change the layout - assert!(base.meta.is_none()); + assert!(!base.meta.has_meta()); Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..base }) } /// Project into an mplace - pub fn mplace_projection( + pub(super) fn mplace_projection( &self, base: MPlaceTy<'tcx, M::PointerTag>, proj_elem: &mir::PlaceElem<'tcx>, @@ -971,7 +1013,7 @@ where pub fn force_allocation_maybe_sized( &mut self, place: PlaceTy<'tcx, M::PointerTag>, - meta: Option>, + meta: MemPlaceMeta, ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option)> { let (mplace, size) = match place.place { Place::Local { frame, local } => { @@ -1016,7 +1058,7 @@ where &mut self, place: PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { - Ok(self.force_allocation_maybe_sized(place, None)?.0) + Ok(self.force_allocation_maybe_sized(place, MemPlaceMeta::None)?.0) } pub fn allocate( @@ -1036,8 +1078,11 @@ where ) -> MPlaceTy<'tcx, M::PointerTag> { let ptr = self.memory.allocate_static_bytes(str.as_bytes(), kind); let meta = Scalar::from_uint(str.len() as u128, self.pointer_size()); - let mplace = - MemPlace { ptr: ptr.into(), align: Align::from_bytes(1).unwrap(), meta: Some(meta) }; + let mplace = MemPlace { + ptr: ptr.into(), + align: Align::from_bytes(1).unwrap(), + meta: MemPlaceMeta::Meta(meta), + }; let layout = self.layout_of(self.tcx.mk_static_str()).unwrap(); MPlaceTy { mplace, layout } @@ -1145,7 +1190,7 @@ where assert_eq!(align, layout.align.abi); } - let mplace = MPlaceTy { mplace: MemPlace { meta: None, ..*mplace }, layout }; + let mplace = MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..*mplace }, layout }; Ok((instance, mplace)) } } diff --git a/src/librustc_mir/interpret/snapshot.rs b/src/librustc_mir/interpret/snapshot.rs index 6790baf31ccb2..a8e67c8f208a9 100644 --- a/src/librustc_mir/interpret/snapshot.rs +++ b/src/librustc_mir/interpret/snapshot.rs @@ -23,7 +23,9 @@ use rustc_span::source_map::Span; use syntax::ast::Mutability; use super::eval_context::{LocalState, StackPopCleanup}; -use super::{Frame, Immediate, LocalValue, MemPlace, Memory, Operand, Place, ScalarMaybeUndef}; +use super::{ + Frame, Immediate, LocalValue, MemPlace, MemPlaceMeta, Memory, Operand, Place, ScalarMaybeUndef, +}; use crate::const_eval::CompileTimeInterpreter; #[derive(Default)] @@ -205,6 +207,14 @@ impl_snapshot_for!( } ); +impl_snapshot_for!( + enum MemPlaceMeta { + Meta(s), + None, + Poison, + } +); + impl_snapshot_for!(struct MemPlace { ptr, meta, diff --git a/src/librustc_mir/interpret/terminator.rs b/src/librustc_mir/interpret/terminator.rs index a28bb539fd070..37dcab512b991 100644 --- a/src/librustc_mir/interpret/terminator.rs +++ b/src/librustc_mir/interpret/terminator.rs @@ -378,7 +378,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { } None => { // Unsized self. - args[0].assert_mem_place() + args[0].assert_mem_place(self) } }; // Find and consult vtable diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs index 7b82bed2e7a61..12e8cb6071d92 100644 --- a/src/librustc_mir/interpret/validity.rs +++ b/src/librustc_mir/interpret/validity.rs @@ -16,7 +16,7 @@ use rustc_span::symbol::{sym, Symbol}; use std::hash::Hash; use super::{ - CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, Scalar, + CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, ValueVisitor, }; @@ -246,13 +246,13 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, 'tcx, M fn check_wide_ptr_meta( &mut self, - meta: Option>, + meta: MemPlaceMeta, pointee: TyLayout<'tcx>, ) -> InterpResult<'tcx> { let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env); match tail.kind { ty::Dynamic(..) => { - let vtable = meta.unwrap(); + let vtable = meta.unwrap_meta(); try_validation!( self.ecx.memory.check_ptr_access( vtable, @@ -276,7 +276,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, 'tcx, M } ty::Slice(..) | ty::Str => { let _len = try_validation!( - meta.unwrap().to_machine_usize(self.ecx), + meta.unwrap_meta().to_machine_usize(self.ecx), "non-integer slice length in wide pointer", self.path ); @@ -571,7 +571,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> ) -> InterpResult<'tcx> { match op.layout.ty.kind { ty::Str => { - let mplace = op.assert_mem_place(); // strings are never immediate + let mplace = op.assert_mem_place(self.ecx); // strings are never immediate try_validation!( self.ecx.read_str(mplace), "uninitialized or non-UTF-8 data in str", @@ -599,15 +599,11 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> { // Optimized handling for arrays of integer/float type. - // bailing out for zsts is ok, since the array element type can only be int/float - if op.layout.is_zst() { - return Ok(()); - } - // non-ZST array cannot be immediate, slices are never immediate - let mplace = op.assert_mem_place(); + // Arrays cannot be immediate, slices are never immediate. + let mplace = op.assert_mem_place(self.ecx); // This is the length of the array/slice. let len = mplace.len(self.ecx)?; - // zero length slices have nothing to be checked + // Zero length slices have nothing to be checked. if len == 0 { return Ok(()); } diff --git a/src/librustc_mir/interpret/visitor.rs b/src/librustc_mir/interpret/visitor.rs index 2cfcf0ff06d0f..d2594e8707104 100644 --- a/src/librustc_mir/interpret/visitor.rs +++ b/src/librustc_mir/interpret/visitor.rs @@ -223,7 +223,7 @@ macro_rules! make_value_visitor { match v.layout().ty.kind { ty::Dynamic(..) => { // immediate trait objects are not a thing - let dest = v.to_op(self.ecx())?.assert_mem_place(); + let dest = v.to_op(self.ecx())?.assert_mem_place(self.ecx()); let inner = self.ecx().unpack_dyn_trait(dest)?.1; trace!("walk_value: dyn object layout: {:#?}", inner.layout); // recurse with the inner type @@ -292,13 +292,7 @@ macro_rules! make_value_visitor { }, layout::FieldPlacement::Array { .. } => { // Let's get an mplace first. - let mplace = if v.layout().is_zst() { - // it's a ZST, the memory content cannot matter - MPlaceTy::dangling(v.layout(), self.ecx()) - } else { - // non-ZST array/slice/str cannot be immediate - v.to_op(self.ecx())?.assert_mem_place() - }; + let mplace = v.to_op(self.ecx())?.assert_mem_place(self.ecx()); // Now we can go over all the fields. let iter = self.ecx().mplace_array_fields(mplace)? .map(|f| f.and_then(|f| { diff --git a/src/librustc_mir/transform/const_prop.rs b/src/librustc_mir/transform/const_prop.rs index 6b0f7be86841e..d5d56b36cf4c3 100644 --- a/src/librustc_mir/transform/const_prop.rs +++ b/src/librustc_mir/transform/const_prop.rs @@ -707,7 +707,8 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { ScalarMaybeUndef::Scalar(r), )) => l.is_bits() && r.is_bits(), interpret::Operand::Indirect(_) if mir_opt_level >= 2 => { - intern_const_alloc_recursive(&mut self.ecx, None, op.assert_mem_place()) + let mplace = op.assert_mem_place(&self.ecx); + intern_const_alloc_recursive(&mut self.ecx, None, mplace) .expect("failed to intern alloc"); true }