From c8baac5776141d9e844b05fef8d144e3664e7a75 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 12 Jul 2021 18:45:26 +0200 Subject: [PATCH 1/2] remove remaining use of Pointer in Allocation API --- .../src/mir/interpret/allocation.rs | 19 +++++++++---------- compiler/rustc_mir/src/interpret/memory.rs | 4 ++-- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index c2645a0914007..75cbb55239c8b 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -512,7 +512,7 @@ impl InitMaskCompressed { /// Transferring the initialization mask to other allocations. impl Allocation { /// Creates a run-length encoding of the initialization mask. - pub fn compress_uninit_range(&self, src: Pointer, size: Size) -> InitMaskCompressed { + pub fn compress_uninit_range(&self, range: AllocRange) -> InitMaskCompressed { // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`), // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from // the source and write it to the destination. Even if we optimized the memory accesses, @@ -526,13 +526,13 @@ impl Allocation { // where each element toggles the state. let mut ranges = smallvec::SmallVec::<[u64; 1]>::new(); - let initial = self.init_mask.get(src.offset); + let initial = self.init_mask.get(range.start); let mut cur_len = 1; let mut cur = initial; - for i in 1..size.bytes() { + for i in 1..range.size.bytes() { // FIXME: optimize to bitshift the current uninitialized block's bits and read the top bit. - if self.init_mask.get(src.offset + Size::from_bytes(i)) == cur { + if self.init_mask.get(range.start + Size::from_bytes(i)) == cur { cur_len += 1; } else { ranges.push(cur_len); @@ -550,24 +550,23 @@ impl Allocation { pub fn mark_compressed_init_range( &mut self, defined: &InitMaskCompressed, - dest: Pointer, - size: Size, + range: AllocRange, repeat: u64, ) { // An optimization where we can just overwrite an entire range of initialization // bits if they are going to be uniformly `1` or `0`. if defined.ranges.len() <= 1 { self.init_mask.set_range_inbounds( - dest.offset, - dest.offset + size * repeat, // `Size` operations + range.start, + range.start + range.size * repeat, // `Size` operations defined.initial, ); return; } for mut j in 0..repeat { - j *= size.bytes(); - j += dest.offset.bytes(); + j *= range.size.bytes(); + j += range.start.bytes(); let mut cur = defined.initial; for range in &defined.ranges { let old_j = j; diff --git a/compiler/rustc_mir/src/interpret/memory.rs b/compiler/rustc_mir/src/interpret/memory.rs index cb929c21850cb..990dbbcd250b2 100644 --- a/compiler/rustc_mir/src/interpret/memory.rs +++ b/compiler/rustc_mir/src/interpret/memory.rs @@ -1049,7 +1049,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { num_copies, ); // Prepare a copy of the initialization mask. - let compressed = src_alloc.compress_uninit_range(src, size); + let compressed = src_alloc.compress_uninit_range(alloc_range(src.offset, size)); // This checks relocation edges on the src. let src_bytes = src_alloc .get_bytes_with_uninit_and_ptr(&tcx, alloc_range(src.offset, size)) @@ -1110,7 +1110,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { } // now fill in all the "init" data - dest_alloc.mark_compressed_init_range(&compressed, dest, size, num_copies); + dest_alloc.mark_compressed_init_range(&compressed, alloc_range(dest.offset, size), num_copies); // copy the relocations to the destination dest_alloc.mark_relocation_range(relocations); From 6b26640583649b6e688f7c1d82778a555bf66b79 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 12 Jul 2021 18:45:56 +0200 Subject: [PATCH 2/2] remove unnecessary deallocate_local hack --- .../rustc_mir/src/interpret/eval_context.rs | 6 +++--- compiler/rustc_mir/src/interpret/memory.rs | 17 +++++------------ 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/compiler/rustc_mir/src/interpret/eval_context.rs b/compiler/rustc_mir/src/interpret/eval_context.rs index 648a7abfdc7b1..8cd459265dfc5 100644 --- a/compiler/rustc_mir/src/interpret/eval_context.rs +++ b/compiler/rustc_mir/src/interpret/eval_context.rs @@ -18,8 +18,8 @@ use rustc_span::{Pos, Span}; use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size, TargetDataLayout}; use super::{ - Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, Operand, Place, PlaceTy, - ScalarMaybeUninit, StackPopJump, + Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, MemoryKind, Operand, Place, + PlaceTy, ScalarMaybeUninit, StackPopJump, }; use crate::transform::validate::equal_up_to_regions; use crate::util::storage::AlwaysLiveLocals; @@ -900,7 +900,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // due to the local having ZST type. let ptr = ptr.assert_ptr(); trace!("deallocating local: {:?}", self.memory.dump_alloc(ptr.alloc_id)); - self.memory.deallocate_local(ptr)?; + self.memory.deallocate(ptr, None, MemoryKind::Stack)?; }; Ok(()) } diff --git a/compiler/rustc_mir/src/interpret/memory.rs b/compiler/rustc_mir/src/interpret/memory.rs index 990dbbcd250b2..5f719cc160706 100644 --- a/compiler/rustc_mir/src/interpret/memory.rs +++ b/compiler/rustc_mir/src/interpret/memory.rs @@ -276,17 +276,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { Ok(new_ptr) } - /// Deallocate a local, or do nothing if that local has been made into a global. - pub fn deallocate_local(&mut self, ptr: Pointer) -> InterpResult<'tcx> { - // The allocation might be already removed by global interning. - // This can only really happen in the CTFE instance, not in miri. - if self.alloc_map.contains_key(&ptr.alloc_id) { - self.deallocate(ptr, None, MemoryKind::Stack) - } else { - Ok(()) - } - } - pub fn deallocate( &mut self, ptr: Pointer, @@ -1110,7 +1099,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { } // now fill in all the "init" data - dest_alloc.mark_compressed_init_range(&compressed, alloc_range(dest.offset, size), num_copies); + dest_alloc.mark_compressed_init_range( + &compressed, + alloc_range(dest.offset, size), + num_copies, + ); // copy the relocations to the destination dest_alloc.mark_relocation_range(relocations);