From 53565b23ac21bc15be1feab9fb316fce7a73e21c Mon Sep 17 00:00:00 2001 From: Maybe Waffle Date: Fri, 19 Aug 2022 13:20:22 +0400 Subject: [PATCH] Make use of `[wrapping_]byte_{add,sub}` ...replacing `.cast().wrapping_offset().cast()` & similar code. --- compiler/rustc_arena/src/lib.rs | 3 ++- library/alloc/src/vec/into_iter.rs | 13 +++++-------- library/alloc/src/vec/mod.rs | 4 ++-- library/core/src/ptr/const_ptr.rs | 2 +- library/core/src/ptr/mut_ptr.rs | 2 +- library/core/src/slice/iter/macros.rs | 2 +- library/core/tests/atomic.rs | 2 +- library/core/tests/ptr.rs | 2 +- library/std/src/io/error/repr_bitpacked.rs | 4 ++-- library/std/src/lib.rs | 1 + 10 files changed, 17 insertions(+), 18 deletions(-) diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs index d2f8ef8eaae7d..46dbbd83d1905 100644 --- a/compiler/rustc_arena/src/lib.rs +++ b/compiler/rustc_arena/src/lib.rs @@ -16,6 +16,7 @@ #![feature(maybe_uninit_slice)] #![feature(min_specialization)] #![feature(decl_macro)] +#![feature(pointer_byte_offsets)] #![feature(rustc_attrs)] #![cfg_attr(test, feature(test))] #![feature(strict_provenance)] @@ -211,7 +212,7 @@ impl TypedArena { unsafe { if mem::size_of::() == 0 { - self.ptr.set((self.ptr.get() as *mut u8).wrapping_offset(1) as *mut T); + self.ptr.set(self.ptr.get().wrapping_byte_add(1)); let ptr = ptr::NonNull::::dangling().as_ptr(); // Don't drop the object. This `write` is equivalent to `forget`. ptr::write(ptr, object); diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index 4351548811d2e..ed049194dd068 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -4,7 +4,6 @@ use crate::alloc::{Allocator, Global}; use crate::raw_vec::RawVec; use core::array; use core::fmt; -use core::intrinsics::arith_offset; use core::iter::{ FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce, }; @@ -154,7 +153,7 @@ impl Iterator for IntoIter { // purposefully don't use 'ptr.offset' because for // vectors with 0-size elements this would return the // same pointer. - self.ptr = unsafe { arith_offset(self.ptr as *const i8, 1) as *mut T }; + self.ptr = self.ptr.wrapping_byte_add(1); // Make up a value of this ZST. Some(unsafe { mem::zeroed() }) @@ -184,7 +183,7 @@ impl Iterator for IntoIter { // SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound // effectively results in unsigned pointers representing positions 0..usize::MAX, // which is valid for ZSTs. - self.ptr = unsafe { arith_offset(self.ptr as *const i8, step_size as isize) as *mut T } + self.ptr = self.ptr.wrapping_byte_add(step_size); } else { // SAFETY: the min() above ensures that step_size is in bounds self.ptr = unsafe { self.ptr.add(step_size) }; @@ -217,7 +216,7 @@ impl Iterator for IntoIter { return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) }); } - self.ptr = unsafe { arith_offset(self.ptr as *const i8, N as isize) as *mut T }; + self.ptr = self.ptr.wrapping_byte_add(N); // Safety: ditto return Ok(unsafe { MaybeUninit::array_assume_init(raw_ary) }); } @@ -267,7 +266,7 @@ impl DoubleEndedIterator for IntoIter { None } else if mem::size_of::() == 0 { // See above for why 'ptr.offset' isn't used - self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T }; + self.end = self.ptr.wrapping_byte_sub(1); // Make up a value of this ZST. Some(unsafe { mem::zeroed() }) @@ -283,9 +282,7 @@ impl DoubleEndedIterator for IntoIter { let step_size = self.len().min(n); if mem::size_of::() == 0 { // SAFETY: same as for advance_by() - self.end = unsafe { - arith_offset(self.end as *const i8, step_size.wrapping_neg() as isize) as *mut T - } + self.end = self.end.wrapping_byte_sub(step_size); } else { // SAFETY: same as for advance_by() self.end = unsafe { self.end.sub(step_size) }; diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 2c786fd511ead..1f19b9e594549 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -59,7 +59,7 @@ use core::cmp::Ordering; use core::convert::TryFrom; use core::fmt; use core::hash::{Hash, Hasher}; -use core::intrinsics::{arith_offset, assume}; +use core::intrinsics::assume; use core::iter; #[cfg(not(no_global_oom_handling))] use core::iter::FromIterator; @@ -2678,7 +2678,7 @@ impl IntoIterator for Vec { let alloc = ManuallyDrop::new(ptr::read(me.allocator())); let begin = me.as_mut_ptr(); let end = if mem::size_of::() == 0 { - arith_offset(begin as *const i8, me.len() as isize) as *const T + begin.wrapping_byte_add(me.len()) } else { begin.add(me.len()) as *const T }; diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index c25b159c533a1..feba3283e468c 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -249,7 +249,7 @@ impl *const T { let offset = dest_addr.wrapping_sub(self_addr); // This is the canonical desugarring of this operation - self.cast::().wrapping_offset(offset).cast::() + self.wrapping_byte_offset(offset) } /// Creates a new pointer by mapping `self`'s address to a new one. diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index fff06b458c7c1..a70f8747916c3 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -255,7 +255,7 @@ impl *mut T { let offset = dest_addr.wrapping_sub(self_addr); // This is the canonical desugarring of this operation - self.cast::().wrapping_offset(offset).cast::() + self.wrapping_byte_offset(offset) } /// Creates a new pointer by mapping `self`'s address to a new one. diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs index c05242222dde7..47455760a4bd7 100644 --- a/library/core/src/slice/iter/macros.rs +++ b/library/core/src/slice/iter/macros.rs @@ -64,7 +64,7 @@ macro_rules! iterator { // backwards by `n`. `n` must not exceed `self.len()`. macro_rules! zst_shrink { ($self: ident, $n: ident) => { - $self.end = ($self.end as * $raw_mut u8).wrapping_offset(-$n) as * $raw_mut T; + $self.end = $self.end.wrapping_byte_offset(-$n); } } diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs index 13b12db209a76..94b0310603bf4 100644 --- a/library/core/tests/atomic.rs +++ b/library/core/tests/atomic.rs @@ -155,7 +155,7 @@ fn ptr_add_data() { assert_eq!(atom.fetch_ptr_sub(1, SeqCst), n.wrapping_add(1)); assert_eq!(atom.load(SeqCst), n); - let bytes_from_n = |b| n.cast::().wrapping_add(b).cast::(); + let bytes_from_n = |b| n.wrapping_byte_add(b); assert_eq!(atom.fetch_byte_add(1, SeqCst), n); assert_eq!(atom.load(SeqCst), bytes_from_n(1)); diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs index 12861794c2d2c..97a369810056d 100644 --- a/library/core/tests/ptr.rs +++ b/library/core/tests/ptr.rs @@ -650,7 +650,7 @@ fn thin_box() { .unwrap_or_else(|| handle_alloc_error(layout)) .cast::>(); ptr.as_ptr().write(meta); - ptr.cast::().as_ptr().add(offset).cast::().write(value); + ptr.as_ptr().byte_add(offset).cast::().write(value); Self { ptr, phantom: PhantomData } } } diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs index 292bf4826fd23..781ae03ad45dc 100644 --- a/library/std/src/io/error/repr_bitpacked.rs +++ b/library/std/src/io/error/repr_bitpacked.rs @@ -269,10 +269,10 @@ where } TAG_SIMPLE_MESSAGE => ErrorData::SimpleMessage(&*ptr.cast::().as_ptr()), TAG_CUSTOM => { - // It would be correct for us to use `ptr::sub` here (see the + // It would be correct for us to use `ptr::byte_sub` here (see the // comment above the `wrapping_add` call in `new_custom` for why), // but it isn't clear that it makes a difference, so we don't. - let custom = ptr.as_ptr().cast::().wrapping_sub(TAG_CUSTOM).cast::(); + let custom = ptr.as_ptr().wrapping_byte_sub(TAG_CUSTOM).cast::(); ErrorData::Custom(make_custom(custom)) } _ => { diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index 71bbf4317e0a5..ba16a5c68ad9e 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -296,6 +296,7 @@ #![feature(panic_can_unwind)] #![feature(panic_info_message)] #![feature(panic_internals)] +#![feature(pointer_byte_offsets)] #![feature(pointer_is_aligned)] #![feature(portable_simd)] #![feature(prelude_2024)]