Skip to content

Commit

Permalink
Auto merge of #124905 - reitermarkus:u32-from-char-opt, r=scottmcm
Browse files Browse the repository at this point in the history
Allow optimizing `u32::from::<char>`.

Extracted from #124307.

This allows optimizing the panicking branch in the `escape_unicode` function, see https://rust.godbolt.org/z/61YhKrhvP.
  • Loading branch information
bors committed Jul 27, 2024
2 parents ad3c5a3 + 2f2eb22 commit 3942254
Show file tree
Hide file tree
Showing 2 changed files with 114 additions and 57 deletions.
136 changes: 79 additions & 57 deletions compiler/rustc_codegen_ssa/src/mir/rvalue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ use crate::traits::*;
use crate::MemFlags;

use rustc_middle::mir;
use rustc_middle::ty::cast::{CastTy, IntTy};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, adjustment::PointerCoercion, Instance, Ty, TyCtxt};
use rustc_middle::{bug, span_bug};
Expand Down Expand Up @@ -238,21 +237,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
OperandValue::Immediate(imm) => {
let OperandValueKind::Immediate(in_scalar) = operand_kind else {
let OperandValueKind::Immediate(from_scalar) = operand_kind else {
bug!("Found {operand_kind:?} for operand {operand:?}");
};
if let OperandValueKind::Immediate(out_scalar) = cast_kind
&& in_scalar.size(self.cx) == out_scalar.size(self.cx)
if let OperandValueKind::Immediate(to_scalar) = cast_kind
&& from_scalar.size(self.cx) == to_scalar.size(self.cx)
{
let operand_bty = bx.backend_type(operand.layout);
let cast_bty = bx.backend_type(cast);
let from_backend_ty = bx.backend_type(operand.layout);
let to_backend_ty = bx.backend_type(cast);
Some(OperandValue::Immediate(self.transmute_immediate(
bx,
imm,
in_scalar,
operand_bty,
out_scalar,
cast_bty,
from_scalar,
from_backend_ty,
to_scalar,
to_backend_ty,
)))
} else {
None
Expand Down Expand Up @@ -281,6 +280,58 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}

/// Cast one of the immediates from an [`OperandValue::Immediate`]
/// or an [`OperandValue::Pair`] to an immediate of the target type.
///
/// Returns `None` if the cast is not possible.
fn cast_immediate(
&self,
bx: &mut Bx,
mut imm: Bx::Value,
from_scalar: abi::Scalar,
from_backend_ty: Bx::Type,
to_scalar: abi::Scalar,
to_backend_ty: Bx::Type,
) -> Option<Bx::Value> {
use abi::Primitive::*;

// When scalars are passed by value, there's no metadata recording their
// valid ranges. For example, `char`s are passed as just `i32`, with no
// way for LLVM to know that they're 0x10FFFF at most. Thus we assume
// the range of the input value too, not just the output range.
self.assume_scalar_range(bx, imm, from_scalar, from_backend_ty);

imm = match (from_scalar.primitive(), to_scalar.primitive()) {
(Int(_, is_signed), Int(..)) => bx.intcast(imm, to_backend_ty, is_signed),
(Float(_), Float(_)) => {
let srcsz = bx.cx().float_width(from_backend_ty);
let dstsz = bx.cx().float_width(to_backend_ty);
if dstsz > srcsz {
bx.fpext(imm, to_backend_ty)
} else if srcsz > dstsz {
bx.fptrunc(imm, to_backend_ty)
} else {
imm
}
}
(Int(_, is_signed), Float(_)) => {
if is_signed {
bx.sitofp(imm, to_backend_ty)
} else {
bx.uitofp(imm, to_backend_ty)
}
}
(Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
(Int(_, is_signed), Pointer(..)) => {
let usize_imm = bx.intcast(imm, bx.cx().type_isize(), is_signed);
bx.inttoptr(usize_imm, to_backend_ty)
}
(Float(_), Int(_, is_signed)) => bx.cast_float_to_int(is_signed, imm, to_backend_ty),
_ => return None,
};
Some(imm)
}

/// Transmutes one of the immediates from an [`OperandValue::Immediate`]
/// or an [`OperandValue::Pair`] to an immediate of the target type.
///
Expand Down Expand Up @@ -487,62 +538,33 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
| mir::CastKind::IntToFloat
| mir::CastKind::PtrToPtr
| mir::CastKind::FnPtrToPtr

// Since int2ptr can have arbitrary integer types as input (so we have to do
// sign extension and all that), it is currently best handled in the same code
// path as the other integer-to-X casts.
| mir::CastKind::PointerWithExposedProvenance => {
let imm = operand.immediate();
let operand_kind = self.value_kind(operand.layout);
let OperandValueKind::Immediate(from_scalar) = operand_kind else {
bug!("Found {operand_kind:?} for operand {operand:?}");
};
let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);

assert!(bx.cx().is_backend_immediate(cast));
let ll_t_out = bx.cx().immediate_backend_type(cast);
let to_backend_ty = bx.cx().immediate_backend_type(cast);
if operand.layout.abi.is_uninhabited() {
let val = OperandValue::Immediate(bx.cx().const_poison(ll_t_out));
let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
return OperandRef { val, layout: cast };
}
let r_t_in =
CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
let llval = operand.immediate();

let newval = match (r_t_in, r_t_out) {
(CastTy::Int(i), CastTy::Int(_)) => {
bx.intcast(llval, ll_t_out, i.is_signed())
}
(CastTy::Float, CastTy::Float) => {
let srcsz = bx.cx().float_width(ll_t_in);
let dstsz = bx.cx().float_width(ll_t_out);
if dstsz > srcsz {
bx.fpext(llval, ll_t_out)
} else if srcsz > dstsz {
bx.fptrunc(llval, ll_t_out)
} else {
llval
}
}
(CastTy::Int(i), CastTy::Float) => {
if i.is_signed() {
bx.sitofp(llval, ll_t_out)
} else {
bx.uitofp(llval, ll_t_out)
}
}
(CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
bx.pointercast(llval, ll_t_out)
}
(CastTy::Int(i), CastTy::Ptr(_)) => {
let usize_llval =
bx.intcast(llval, bx.cx().type_isize(), i.is_signed());
bx.inttoptr(usize_llval, ll_t_out)
}
(CastTy::Float, CastTy::Int(IntTy::I)) => {
bx.cast_float_to_int(true, llval, ll_t_out)
}
(CastTy::Float, CastTy::Int(_)) => {
bx.cast_float_to_int(false, llval, ll_t_out)
}
_ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
let cast_kind = self.value_kind(cast);
let OperandValueKind::Immediate(to_scalar) = cast_kind else {
bug!("Found {cast_kind:?} for operand {cast:?}");
};
OperandValue::Immediate(newval)

self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
.map(OperandValue::Immediate)
.unwrap_or_else(|| {
bug!("Unsupported cast of {operand:?} to {cast:?}");
})
}
mir::CastKind::Transmute => {
self.codegen_transmute_operand(bx, operand, cast).unwrap_or_else(|| {
Expand Down
35 changes: 35 additions & 0 deletions tests/codegen/cast-optimized.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
//@ compile-flags: -O -Z merge-functions=disabled
#![crate_type = "lib"]

// This tests that LLVM can optimize based on the niches in the source or
// destination types for casts.

// CHECK-LABEL: @u32_index
#[no_mangle]
pub fn u32_index(c: u32) -> [bool; 22] {
let mut array = [false; 22];

let index = 32 - c.leading_zeros();

// CHECK: call core::panicking::panic
array[index as usize] = true;

array
}

// CHECK-LABEL: @char_as_u32_index
#[no_mangle]
pub fn char_as_u32_index(c: char) -> [bool; 22] {
// CHECK: %[[B:.+]] = icmp ult i32 %c, 1114112
// CHECK: call void @llvm.assume(i1 %[[B]])
let c = c as u32;

let mut array = [false; 22];

let index = 32 - c.leading_zeros();

// CHECK-NOT: call core::panicking::panic
array[index as usize] = true;

array
}

0 comments on commit 3942254

Please sign in to comment.