Skip to content

Commit

Permalink
Unrolled build for rust-lang#123775
Browse files Browse the repository at this point in the history
Rollup merge of rust-lang#123775 - scottmcm:place-val, r=cjgillot

Make `PlaceRef` and `OperandValue::Ref` share a common `PlaceValue` type

Both `PlaceRef` and `OperandValue::Ref` need the triple of the backend pointer immediate, the optional backend metadata for DSTs, and the actual alignment of the place (since it can differ from the ABI alignment).

This PR introduces a new `PlaceValue` type for those three values, leaving [`PlaceRef`](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_codegen_ssa/mir/place/struct.PlaceRef.html) with the `TyAndLayout` and a `PlaceValue`, just like how [`OperandRef`](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_codegen_ssa/mir/operand/struct.OperandRef.html) is a `TyAndLayout` and an `OperandValue`.

This means that various places that use `Ref`s as places can just pass the `PlaceValue` along, like in the below excerpt from the diff:
```diff
        match operand.val {
-            OperandValue::Ref(ptr, meta, align) => {
-                debug_assert_eq!(meta, None);
+            OperandValue::Ref(source_place_val) => {
+                debug_assert_eq!(source_place_val.llextra, None);
                debug_assert!(matches!(operand_kind, OperandValueKind::Ref));
-                let fake_place = PlaceRef::new_sized_aligned(ptr, cast, align);
+                let fake_place = PlaceRef { val: source_place_val, layout: cast };
                Some(bx.load_operand(fake_place).val)
            }
```

There's more refactoring that I'd like to do after this, but I wanted to stop the PR here where it's hopefully easy (albeit probably not quick) to review since I tried to keep every change line-by-line clear.  (Most are just adding `.val` to get to a field.)

You can also go commit-at-a-time if you'd like.  Each passed tidy and the codegen tests on my machine (though I didn't run the cg_gcc ones).
  • Loading branch information
rust-timer committed Apr 12, 2024
2 parents 46961d2 + d0ae768 commit 0be91ab
Show file tree
Hide file tree
Showing 14 changed files with 239 additions and 169 deletions.
25 changes: 13 additions & 12 deletions compiler/rustc_codegen_gcc/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -974,7 +974,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
&mut self,
place: PlaceRef<'tcx, RValue<'gcc>>,
) -> OperandRef<'tcx, RValue<'gcc>> {
assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
assert_eq!(place.val.llextra.is_some(), place.layout.is_unsized());

if place.layout.is_zst() {
return OperandRef::zero_sized(place.layout);
Expand All @@ -999,10 +999,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
}
}

let val = if let Some(llextra) = place.llextra {
OperandValue::Ref(place.llval, Some(llextra), place.align)
let val = if let Some(_) = place.val.llextra {
// FIXME: Merge with the `else` below?
OperandValue::Ref(place.val)
} else if place.layout.is_gcc_immediate() {
let load = self.load(place.layout.gcc_type(self), place.llval, place.align);
let load = self.load(place.layout.gcc_type(self), place.val.llval, place.val.align);
if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
scalar_load_metadata(self, load, scalar);
}
Expand All @@ -1012,9 +1013,9 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {

let mut load = |i, scalar: &abi::Scalar, align| {
let llptr = if i == 0 {
place.llval
place.val.llval
} else {
self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes()))
self.inbounds_ptradd(place.val.llval, self.const_usize(b_offset.bytes()))
};
let llty = place.layout.scalar_pair_element_gcc_type(self, i);
let load = self.load(llty, llptr, align);
Expand All @@ -1027,11 +1028,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
};

OperandValue::Pair(
load(0, a, place.align),
load(1, b, place.align.restrict_for_offset(b_offset)),
load(0, a, place.val.align),
load(1, b, place.val.align.restrict_for_offset(b_offset)),
)
} else {
OperandValue::Ref(place.llval, None, place.align)
OperandValue::Ref(place.val)
};

OperandRef { val, layout: place.layout }
Expand All @@ -1045,8 +1046,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
) {
let zero = self.const_usize(0);
let count = self.const_usize(count);
let start = dest.project_index(self, zero).llval;
let end = dest.project_index(self, count).llval;
let start = dest.project_index(self, zero).val.llval;
let end = dest.project_index(self, count).val.llval;

let header_bb = self.append_sibling_block("repeat_loop_header");
let body_bb = self.append_sibling_block("repeat_loop_body");
Expand All @@ -1064,7 +1065,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
self.cond_br(keep_going, body_bb, next_bb);

self.switch_to_block(body_bb);
let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
let align = dest.val.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));

let next = self.inbounds_gep(
Expand Down
21 changes: 13 additions & 8 deletions compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use rustc_codegen_ssa::base::wants_msvc_seh;
use rustc_codegen_ssa::common::IntPredicate;
use rustc_codegen_ssa::errors::InvalidMonomorphization;
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
use rustc_codegen_ssa::traits::{
ArgAbiMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods,
};
Expand Down Expand Up @@ -354,7 +354,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {

let block = self.llbb();
let extended_asm = block.add_extended_asm(None, "");
extended_asm.add_input_operand(None, "r", result.llval);
extended_asm.add_input_operand(None, "r", result.val.llval);
extended_asm.add_clobber("memory");
extended_asm.set_volatile_flag(true);

Expand Down Expand Up @@ -388,8 +388,8 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
if !fn_abi.ret.is_ignore() {
if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode {
let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
let ptr = self.pointercast(result.llval, ptr_llty);
self.store(llval, ptr, result.align);
let ptr = self.pointercast(result.val.llval, ptr_llty);
self.store(llval, ptr, result.val.align);
} else {
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
.val
Expand Down Expand Up @@ -502,7 +502,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
return;
}
if self.is_sized_indirect() {
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
OperandValue::Ref(PlaceValue::new_sized(val, self.layout.align.abi)).store(bx, dst)
} else if self.is_unsized_indirect() {
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
} else if let PassMode::Cast { ref cast, .. } = self.mode {
Expand All @@ -511,7 +511,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
let cast_dst = bx.pointercast(dst.val.llval, cast_ptr_llty);
bx.store(val, cast_dst, self.layout.align.abi);
} else {
// The actual return type is a struct, but the ABI
Expand Down Expand Up @@ -539,7 +539,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {

// ... and then memcpy it to the intended destination.
bx.memcpy(
dst.llval,
dst.val.llval,
self.layout.align.abi,
llscratch,
scratch_align,
Expand Down Expand Up @@ -571,7 +571,12 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
OperandValue::Pair(next(), next()).store(bx, dst);
}
PassMode::Indirect { meta_attrs: Some(_), .. } => {
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
let place_val = PlaceValue {
llval: next(),
llextra: Some(next()),
align: self.layout.align.abi,
};
OperandValue::Ref(place_val).store(bx, dst);
}
PassMode::Direct(_)
| PassMode::Indirect { meta_attrs: None, .. }
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let place = PlaceRef::alloca(bx, args[0].layout);
args[0].val.store(bx, place);
let int_ty = bx.type_ix(expected_bytes * 8);
let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty));
let ptr = bx.pointercast(place.val.llval, bx.cx.type_ptr_to(int_ty));
bx.load(int_ty, ptr, Align::ONE)
}
_ => return_error!(InvalidMonomorphization::InvalidBitmask {
Expand Down
13 changes: 9 additions & 4 deletions compiler/rustc_codegen_llvm/src/abi.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use crate::type_of::LayoutLlvmExt;
use crate::value::Value;

use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::MemFlags;
use rustc_middle::bug;
Expand Down Expand Up @@ -207,7 +207,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
// Sized indirect arguments
PassMode::Indirect { attrs, meta_attrs: None, on_stack: _ } => {
let align = attrs.pointee_align.unwrap_or(self.layout.align.abi);
OperandValue::Ref(val, None, align).store(bx, dst);
OperandValue::Ref(PlaceValue::new_sized(val, align)).store(bx, dst);
}
// Unsized indirect qrguments
PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
Expand All @@ -233,7 +233,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
bx.store(val, llscratch, scratch_align);
// ... and then memcpy it to the intended destination.
bx.memcpy(
dst.llval,
dst.val.llval,
self.layout.align.abi,
llscratch,
scratch_align,
Expand Down Expand Up @@ -265,7 +265,12 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
OperandValue::Pair(next(), next()).store(bx, dst);
}
PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
let place_val = PlaceValue {
llval: next(),
llextra: Some(next()),
align: self.layout.align.abi,
};
OperandValue::Ref(place_val).store(bx, dst);
}
PassMode::Direct(_)
| PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ }
Expand Down
21 changes: 11 additions & 10 deletions compiler/rustc_codegen_llvm/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -535,7 +535,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
panic!("unsized locals must not be `extern` types");
}
}
assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
assert_eq!(place.val.llextra.is_some(), place.layout.is_unsized());

if place.layout.is_zst() {
return OperandRef::zero_sized(place.layout);
Expand Down Expand Up @@ -579,13 +579,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}

let val = if let Some(llextra) = place.llextra {
OperandValue::Ref(place.llval, Some(llextra), place.align)
let val = if let Some(_) = place.val.llextra {
// FIXME: Merge with the `else` below?
OperandValue::Ref(place.val)
} else if place.layout.is_llvm_immediate() {
let mut const_llval = None;
let llty = place.layout.llvm_type(self);
unsafe {
if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
if let Some(global) = llvm::LLVMIsAGlobalVariable(place.val.llval) {
if llvm::LLVMIsGlobalConstant(global) == llvm::True {
if let Some(init) = llvm::LLVMGetInitializer(global) {
if self.val_ty(init) == llty {
Expand All @@ -596,7 +597,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
let llval = const_llval.unwrap_or_else(|| {
let load = self.load(llty, place.llval, place.align);
let load = self.load(llty, place.val.llval, place.val.align);
if let abi::Abi::Scalar(scalar) = place.layout.abi {
scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
}
Expand All @@ -608,9 +609,9 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {

let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
let llptr = if i == 0 {
place.llval
place.val.llval
} else {
self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes()))
self.inbounds_ptradd(place.val.llval, self.const_usize(b_offset.bytes()))
};
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
let load = self.load(llty, llptr, align);
Expand All @@ -619,11 +620,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
};

OperandValue::Pair(
load(0, a, place.layout, place.align, Size::ZERO),
load(1, b, place.layout, place.align.restrict_for_offset(b_offset), b_offset),
load(0, a, place.layout, place.val.align, Size::ZERO),
load(1, b, place.layout, place.val.align.restrict_for_offset(b_offset), b_offset),
)
} else {
OperandValue::Ref(place.llval, None, place.align)
OperandValue::Ref(place.val)
};

OperandRef { val, layout: place.layout }
Expand Down
8 changes: 4 additions & 4 deletions compiler/rustc_codegen_llvm/src/intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
llvm::LLVMSetAlignment(load, align);
}
if !result.layout.is_zst() {
self.store(load, result.llval, result.align);
self.store_to_place(load, result.val);
}
return Ok(());
}
Expand Down Expand Up @@ -428,7 +428,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {

sym::black_box => {
args[0].val.store(self, result);
let result_val_span = [result.llval];
let result_val_span = [result.val.llval];
// We need to "use" the argument in some way LLVM can't introspect, and on
// targets that support it we can typically leverage inline assembly to do
// this. LLVM's interpretation of inline assembly is that it's, well, a black
Expand Down Expand Up @@ -482,7 +482,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {

if !fn_abi.ret.is_ignore() {
if let PassMode::Cast { .. } = &fn_abi.ret.mode {
self.store(llval, result.llval, result.align);
self.store(llval, result.val.llval, result.val.align);
} else {
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
.val
Expand Down Expand Up @@ -1065,7 +1065,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let place = PlaceRef::alloca(bx, args[0].layout);
args[0].val.store(bx, place);
let int_ty = bx.type_ix(expected_bytes * 8);
bx.load(int_ty, place.llval, Align::ONE)
bx.load(int_ty, place.val.llval, Align::ONE)
}
_ => return_error!(InvalidMonomorphization::InvalidBitmask {
span,
Expand Down
Loading

0 comments on commit 0be91ab

Please sign in to comment.