Skip to content

Commit

Permalink
Rollup merge of rust-lang#103456 - scottmcm:fix-unchecked-shifts, r=s…
Browse files Browse the repository at this point in the history
…cottmcm

`unchecked_{shl|shr}` should use `u32` as the RHS

The other shift methods, such as https://doc.rust-lang.org/nightly/std/primitive.u64.html#method.checked_shr and https://doc.rust-lang.org/nightly/std/primitive.i16.html#method.wrapping_shl, use `u32` for the shift amount.  That's consistent with other things, like `count_ones`, which also always use `u32` for a bit count, regardless of the size of the type.

This PR changes `unchecked_shl` and `unchecked_shr` to also use `u32` for the shift amount (rather than Self).

cc rust-lang#85122, the `unchecked_math` tracking issue
  • Loading branch information
Manishearth committed Nov 18, 2022
2 parents c4fb7f9 + 9d4b1f9 commit 1bb8339
Show file tree
Hide file tree
Showing 5 changed files with 88 additions and 12 deletions.
1 change: 1 addition & 0 deletions library/core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@
#![feature(const_pin)]
#![feature(const_ptr_sub_ptr)]
#![feature(const_replace)]
#![feature(const_result_drop)]
#![feature(const_ptr_as_ref)]
#![feature(const_ptr_is_null)]
#![feature(const_ptr_read)]
Expand Down
16 changes: 10 additions & 6 deletions library/core/src/num/int_macros.rs
Original file line number Diff line number Diff line change
Expand Up @@ -761,10 +761,11 @@ macro_rules! int_impl {
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn unchecked_shl(self, rhs: Self) -> Self {
pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_shl`.
unsafe { intrinsics::unchecked_shl(self, rhs) }
// Any legal shift amount is losslessly representable in the self type.
unsafe { intrinsics::unchecked_shl(self, rhs.try_into().ok().unwrap_unchecked()) }
}

/// Checked shift right. Computes `self >> rhs`, returning `None` if `rhs` is
Expand Down Expand Up @@ -808,10 +809,11 @@ macro_rules! int_impl {
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn unchecked_shr(self, rhs: Self) -> Self {
pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_shr`.
unsafe { intrinsics::unchecked_shr(self, rhs) }
// Any legal shift amount is losslessly representable in the self type.
unsafe { intrinsics::unchecked_shr(self, rhs.try_into().ok().unwrap_unchecked()) }
}

/// Checked absolute value. Computes `self.abs()`, returning `None` if
Expand Down Expand Up @@ -1358,11 +1360,12 @@ macro_rules! int_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
#[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT)
self.unchecked_shl(rhs & ($BITS - 1))
}
}

Expand All @@ -1387,11 +1390,12 @@ macro_rules! int_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
#[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT)
self.unchecked_shr(rhs & ($BITS - 1))
}
}

Expand Down
1 change: 1 addition & 0 deletions library/core/src/num/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#![stable(feature = "rust1", since = "1.0.0")]

use crate::ascii;
use crate::convert::TryInto;
use crate::error::Error;
use crate::intrinsics;
use crate::mem;
Expand Down
16 changes: 10 additions & 6 deletions library/core/src/num/uint_macros.rs
Original file line number Diff line number Diff line change
Expand Up @@ -908,10 +908,11 @@ macro_rules! uint_impl {
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn unchecked_shl(self, rhs: Self) -> Self {
pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_shl`.
unsafe { intrinsics::unchecked_shl(self, rhs) }
// Any legal shift amount is losslessly representable in the self type.
unsafe { intrinsics::unchecked_shl(self, rhs.try_into().ok().unwrap_unchecked()) }
}

/// Checked shift right. Computes `self >> rhs`, returning `None`
Expand Down Expand Up @@ -955,10 +956,11 @@ macro_rules! uint_impl {
#[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub const unsafe fn unchecked_shr(self, rhs: Self) -> Self {
pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_shr`.
unsafe { intrinsics::unchecked_shr(self, rhs) }
// Any legal shift amount is losslessly representable in the self type.
unsafe { intrinsics::unchecked_shr(self, rhs.try_into().ok().unwrap_unchecked()) }
}

/// Checked exponentiation. Computes `self.pow(exp)`, returning `None` if
Expand Down Expand Up @@ -1374,11 +1376,12 @@ macro_rules! uint_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
#[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT)
self.unchecked_shl(rhs & ($BITS - 1))
}
}

Expand Down Expand Up @@ -1406,11 +1409,12 @@ macro_rules! uint_impl {
#[must_use = "this returns the result of the operation, \
without modifying the original"]
#[inline(always)]
#[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)]
pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT)
self.unchecked_shr(rhs & ($BITS - 1))
}
}

Expand Down
66 changes: 66 additions & 0 deletions src/test/codegen/unchecked_shifts.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
// compile-flags: -O
// min-llvm-version: 15.0 (LLVM 13 in CI does this differently from submodule LLVM)
// ignore-debug (because unchecked is checked in debug)

#![crate_type = "lib"]
#![feature(unchecked_math)]

// CHECK-LABEL: @unchecked_shl_unsigned_same
#[no_mangle]
pub unsafe fn unchecked_shl_unsigned_same(a: u32, b: u32) -> u32 {
// CHECK-NOT: and i32
// CHECK: shl i32 %a, %b
// CHECK-NOT: and i32
a.unchecked_shl(b)
}

// CHECK-LABEL: @unchecked_shl_unsigned_smaller
#[no_mangle]
pub unsafe fn unchecked_shl_unsigned_smaller(a: u16, b: u32) -> u16 {
// This uses -DAG to avoid failing on irrelevant reorderings,
// like emitting the truncation earlier.

// CHECK-DAG: %[[INRANGE:.+]] = icmp ult i32 %b, 65536
// CHECK-DAG: tail call void @llvm.assume(i1 %[[INRANGE]])
// CHECK-DAG: %[[TRUNC:.+]] = trunc i32 %b to i16
// CHECK-DAG: shl i16 %a, %[[TRUNC]]
a.unchecked_shl(b)
}

// CHECK-LABEL: @unchecked_shl_unsigned_bigger
#[no_mangle]
pub unsafe fn unchecked_shl_unsigned_bigger(a: u64, b: u32) -> u64 {
// CHECK: %[[EXT:.+]] = zext i32 %b to i64
// CHECK: shl i64 %a, %[[EXT]]
a.unchecked_shl(b)
}

// CHECK-LABEL: @unchecked_shr_signed_same
#[no_mangle]
pub unsafe fn unchecked_shr_signed_same(a: i32, b: u32) -> i32 {
// CHECK-NOT: and i32
// CHECK: ashr i32 %a, %b
// CHECK-NOT: and i32
a.unchecked_shr(b)
}

// CHECK-LABEL: @unchecked_shr_signed_smaller
#[no_mangle]
pub unsafe fn unchecked_shr_signed_smaller(a: i16, b: u32) -> i16 {
// This uses -DAG to avoid failing on irrelevant reorderings,
// like emitting the truncation earlier.

// CHECK-DAG: %[[INRANGE:.+]] = icmp ult i32 %b, 32768
// CHECK-DAG: tail call void @llvm.assume(i1 %[[INRANGE]])
// CHECK-DAG: %[[TRUNC:.+]] = trunc i32 %b to i16
// CHECK-DAG: ashr i16 %a, %[[TRUNC]]
a.unchecked_shr(b)
}

// CHECK-LABEL: @unchecked_shr_signed_bigger
#[no_mangle]
pub unsafe fn unchecked_shr_signed_bigger(a: i64, b: u32) -> i64 {
// CHECK: %[[EXT:.+]] = zext i32 %b to i64
// CHECK: ashr i64 %a, %[[EXT]]
a.unchecked_shr(b)
}

0 comments on commit 1bb8339

Please sign in to comment.