diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs index 1e78033256e96..bbd20899a18e1 100644 --- a/library/core/src/lib.rs +++ b/library/core/src/lib.rs @@ -133,6 +133,7 @@ #![feature(const_pin)] #![feature(const_ptr_sub_ptr)] #![feature(const_replace)] +#![feature(const_result_drop)] #![feature(const_ptr_as_ref)] #![feature(const_ptr_is_null)] #![feature(const_ptr_read)] diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index e64eb1cf7aed8..e1ab7ac5ff045 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -761,10 +761,11 @@ macro_rules! int_impl { #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")] #[inline(always)] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces - pub const unsafe fn unchecked_shl(self, rhs: Self) -> Self { + pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self { // SAFETY: the caller must uphold the safety contract for // `unchecked_shl`. - unsafe { intrinsics::unchecked_shl(self, rhs) } + // Any legal shift amount is losslessly representable in the self type. + unsafe { intrinsics::unchecked_shl(self, rhs.try_into().ok().unwrap_unchecked()) } } /// Checked shift right. Computes `self >> rhs`, returning `None` if `rhs` is @@ -808,10 +809,11 @@ macro_rules! int_impl { #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")] #[inline(always)] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces - pub const unsafe fn unchecked_shr(self, rhs: Self) -> Self { + pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self { // SAFETY: the caller must uphold the safety contract for // `unchecked_shr`. - unsafe { intrinsics::unchecked_shr(self, rhs) } + // Any legal shift amount is losslessly representable in the self type. + unsafe { intrinsics::unchecked_shr(self, rhs.try_into().ok().unwrap_unchecked()) } } /// Checked absolute value. Computes `self.abs()`, returning `None` if @@ -1358,11 +1360,12 @@ macro_rules! int_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline(always)] + #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)] pub const fn wrapping_shl(self, rhs: u32) -> Self { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds unsafe { - intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT) + self.unchecked_shl(rhs & ($BITS - 1)) } } @@ -1387,11 +1390,12 @@ macro_rules! int_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline(always)] + #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)] pub const fn wrapping_shr(self, rhs: u32) -> Self { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds unsafe { - intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT) + self.unchecked_shr(rhs & ($BITS - 1)) } } diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs index 311c5fa5b6834..b2328b001de90 100644 --- a/library/core/src/num/mod.rs +++ b/library/core/src/num/mod.rs @@ -3,6 +3,7 @@ #![stable(feature = "rust1", since = "1.0.0")] use crate::ascii; +use crate::convert::TryInto; use crate::error::Error; use crate::intrinsics; use crate::mem; diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index 741d7ec6f592d..af74faa90b110 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -908,10 +908,11 @@ macro_rules! uint_impl { #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")] #[inline(always)] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces - pub const unsafe fn unchecked_shl(self, rhs: Self) -> Self { + pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self { // SAFETY: the caller must uphold the safety contract for // `unchecked_shl`. - unsafe { intrinsics::unchecked_shl(self, rhs) } + // Any legal shift amount is losslessly representable in the self type. + unsafe { intrinsics::unchecked_shl(self, rhs.try_into().ok().unwrap_unchecked()) } } /// Checked shift right. Computes `self >> rhs`, returning `None` @@ -955,10 +956,11 @@ macro_rules! uint_impl { #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")] #[inline(always)] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces - pub const unsafe fn unchecked_shr(self, rhs: Self) -> Self { + pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self { // SAFETY: the caller must uphold the safety contract for // `unchecked_shr`. - unsafe { intrinsics::unchecked_shr(self, rhs) } + // Any legal shift amount is losslessly representable in the self type. + unsafe { intrinsics::unchecked_shr(self, rhs.try_into().ok().unwrap_unchecked()) } } /// Checked exponentiation. Computes `self.pow(exp)`, returning `None` if @@ -1374,11 +1376,12 @@ macro_rules! uint_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline(always)] + #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)] pub const fn wrapping_shl(self, rhs: u32) -> Self { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds unsafe { - intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT) + self.unchecked_shl(rhs & ($BITS - 1)) } } @@ -1406,11 +1409,12 @@ macro_rules! uint_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline(always)] + #[rustc_allow_const_fn_unstable(const_inherent_unchecked_arith)] pub const fn wrapping_shr(self, rhs: u32) -> Self { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds unsafe { - intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT) + self.unchecked_shr(rhs & ($BITS - 1)) } } diff --git a/src/test/codegen/unchecked_shifts.rs b/src/test/codegen/unchecked_shifts.rs new file mode 100644 index 0000000000000..60d0cb09acaf9 --- /dev/null +++ b/src/test/codegen/unchecked_shifts.rs @@ -0,0 +1,66 @@ +// compile-flags: -O +// min-llvm-version: 15.0 (LLVM 13 in CI does this differently from submodule LLVM) +// ignore-debug (because unchecked is checked in debug) + +#![crate_type = "lib"] +#![feature(unchecked_math)] + +// CHECK-LABEL: @unchecked_shl_unsigned_same +#[no_mangle] +pub unsafe fn unchecked_shl_unsigned_same(a: u32, b: u32) -> u32 { + // CHECK-NOT: and i32 + // CHECK: shl i32 %a, %b + // CHECK-NOT: and i32 + a.unchecked_shl(b) +} + +// CHECK-LABEL: @unchecked_shl_unsigned_smaller +#[no_mangle] +pub unsafe fn unchecked_shl_unsigned_smaller(a: u16, b: u32) -> u16 { + // This uses -DAG to avoid failing on irrelevant reorderings, + // like emitting the truncation earlier. + + // CHECK-DAG: %[[INRANGE:.+]] = icmp ult i32 %b, 65536 + // CHECK-DAG: tail call void @llvm.assume(i1 %[[INRANGE]]) + // CHECK-DAG: %[[TRUNC:.+]] = trunc i32 %b to i16 + // CHECK-DAG: shl i16 %a, %[[TRUNC]] + a.unchecked_shl(b) +} + +// CHECK-LABEL: @unchecked_shl_unsigned_bigger +#[no_mangle] +pub unsafe fn unchecked_shl_unsigned_bigger(a: u64, b: u32) -> u64 { + // CHECK: %[[EXT:.+]] = zext i32 %b to i64 + // CHECK: shl i64 %a, %[[EXT]] + a.unchecked_shl(b) +} + +// CHECK-LABEL: @unchecked_shr_signed_same +#[no_mangle] +pub unsafe fn unchecked_shr_signed_same(a: i32, b: u32) -> i32 { + // CHECK-NOT: and i32 + // CHECK: ashr i32 %a, %b + // CHECK-NOT: and i32 + a.unchecked_shr(b) +} + +// CHECK-LABEL: @unchecked_shr_signed_smaller +#[no_mangle] +pub unsafe fn unchecked_shr_signed_smaller(a: i16, b: u32) -> i16 { + // This uses -DAG to avoid failing on irrelevant reorderings, + // like emitting the truncation earlier. + + // CHECK-DAG: %[[INRANGE:.+]] = icmp ult i32 %b, 32768 + // CHECK-DAG: tail call void @llvm.assume(i1 %[[INRANGE]]) + // CHECK-DAG: %[[TRUNC:.+]] = trunc i32 %b to i16 + // CHECK-DAG: ashr i16 %a, %[[TRUNC]] + a.unchecked_shr(b) +} + +// CHECK-LABEL: @unchecked_shr_signed_bigger +#[no_mangle] +pub unsafe fn unchecked_shr_signed_bigger(a: i64, b: u32) -> i64 { + // CHECK: %[[EXT:.+]] = zext i32 %b to i64 + // CHECK: ashr i64 %a, %[[EXT]] + a.unchecked_shr(b) +}