Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ignore ZST offsets when deciding whether to use Scalar/ScalarPair layout #73453

Merged
merged 6 commits into from
Sep 25, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 27 additions & 9 deletions compiler/rustc_codegen_ssa/src/mir/place.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,15 +93,33 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
let effective_field_align = self.align.restrict_for_offset(offset);

let mut simple = || {
// Unions and newtypes only use an offset of 0.
let llval = if offset.bytes() == 0 {
self.llval
} else if let Abi::ScalarPair(ref a, ref b) = self.layout.abi {
// Offsets have to match either first or second field.
assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
bx.struct_gep(self.llval, 1)
} else {
bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
let llval = match self.layout.abi {
_ if offset.bytes() == 0 => {
// Unions and newtypes only use an offset of 0.
// Also handles the first field of Scalar, ScalarPair, and Vector layouts.
self.llval
}
Abi::ScalarPair(ref a, ref b)
if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) =>
{
// Offset matches second field.
bx.struct_gep(self.llval, 1)
}
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
// ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
bx.gep(byte_ptr, &[bx.const_usize(offset.bytes())])
}
Abi::Scalar(_) | Abi::ScalarPair(..) => {
// All fields of Scalar and ScalarPair layouts must have been handled by this point.
// Vector layouts have additional fields for each element of the vector, so don't panic in that case.
bug!(
"offset of non-ZST field `{:?}` does not match layout `{:#?}`",
field,
self.layout
);
}
_ => bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)),
};
PlaceRef {
// HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
Expand Down
112 changes: 47 additions & 65 deletions compiler/rustc_middle/src/ty/layout.rs
Original file line number Diff line number Diff line change
Expand Up @@ -390,78 +390,60 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {

// Unpack newtype ABIs and find scalar pairs.
if sized && size.bytes() > 0 {
// All other fields must be ZSTs, and we need them to all start at 0.
let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
if zst_offsets.all(|(_, o)| o.bytes() == 0) {
let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());

match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
// We have exactly one non-ZST field.
(Some((i, field)), None, None) => {
// Field fills the struct and it has a scalar or scalar pair ABI.
if offsets[i].bytes() == 0
&& align.abi == field.align.abi
&& size == field.size
{
match field.abi {
// For plain scalars, or vectors of them, we can't unpack
// newtypes for `#[repr(C)]`, as that affects C ABIs.
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
abi = field.abi.clone();
}
// But scalar pairs are Rust-specific and get
// treated as aggregates by C ABIs anyway.
Abi::ScalarPair(..) => {
abi = field.abi.clone();
}
_ => {}
// All other fields must be ZSTs.
let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());

match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
// We have exactly one non-ZST field.
(Some((i, field)), None, None) => {
// Field fills the struct and it has a scalar or scalar pair ABI.
if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
{
match field.abi {
// For plain scalars, or vectors of them, we can't unpack
// newtypes for `#[repr(C)]`, as that affects C ABIs.
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
abi = field.abi.clone();
}
// But scalar pairs are Rust-specific and get
// treated as aggregates by C ABIs anyway.
Abi::ScalarPair(..) => {
abi = field.abi.clone();
}
_ => {}
}
}
}

// Two non-ZST fields, and they're both scalars.
(
Some((
i,
&TyAndLayout {
layout: &Layout { abi: Abi::Scalar(ref a), .. }, ..
},
)),
Some((
j,
&TyAndLayout {
layout: &Layout { abi: Abi::Scalar(ref b), .. }, ..
},
)),
None,
) => {
// Order by the memory placement, not source order.
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
((i, a), (j, b))
} else {
((j, b), (i, a))
};
let pair = self.scalar_pair(a.clone(), b.clone());
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index, &[0, 1]);
offsets
}
_ => bug!(),
};
if offsets[i] == pair_offsets[0]
&& offsets[j] == pair_offsets[1]
&& align == pair.align
&& size == pair.size
{
// We can use `ScalarPair` only when it matches our
// already computed layout (including `#[repr(C)]`).
abi = pair.abi;
// Two non-ZST fields, and they're both scalars.
(
Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
None,
) => {
// Order by the memory placement, not source order.
let ((i, a), (j, b)) =
if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
let pair = self.scalar_pair(a.clone(), b.clone());
let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index, &[0, 1]);
offsets
}
_ => bug!(),
};
if offsets[i] == pair_offsets[0]
&& offsets[j] == pair_offsets[1]
&& align == pair.align
&& size == pair.size
{
// We can use `ScalarPair` only when it matches our
// already computed layout (including `#[repr(C)]`).
abi = pair.abi;
}

_ => {}
}

_ => {}
}
}

Expand Down
36 changes: 36 additions & 0 deletions src/test/codegen/tuple-layout-opt.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
// ignore-emscripten
// compile-flags: -C no-prepopulate-passes

// Test that tuples get optimized layout, in particular with a ZST in the last field (#63244)

#![crate_type="lib"]

type ScalarZstLast = (u128, ());
// CHECK: define i128 @test_ScalarZstLast(i128 %_1)
#[no_mangle]
pub fn test_ScalarZstLast(_: ScalarZstLast) -> ScalarZstLast { loop {} }

type ScalarZstFirst = ((), u128);
// CHECK: define i128 @test_ScalarZstFirst(i128 %_1)
#[no_mangle]
pub fn test_ScalarZstFirst(_: ScalarZstFirst) -> ScalarZstFirst { loop {} }

type ScalarPairZstLast = (u8, u128, ());
// CHECK: define { i128, i8 } @test_ScalarPairZstLast(i128 %_1.0, i8 %_1.1)
#[no_mangle]
pub fn test_ScalarPairZstLast(_: ScalarPairZstLast) -> ScalarPairZstLast { loop {} }

type ScalarPairZstFirst = ((), u8, u128);
// CHECK: define { i8, i128 } @test_ScalarPairZstFirst(i8 %_1.0, i128 %_1.1)
#[no_mangle]
pub fn test_ScalarPairZstFirst(_: ScalarPairZstFirst) -> ScalarPairZstFirst { loop {} }

type ScalarPairLotsOfZsts = ((), u8, (), u128, ());
// CHECK: define { i128, i8 } @test_ScalarPairLotsOfZsts(i128 %_1.0, i8 %_1.1)
#[no_mangle]
pub fn test_ScalarPairLotsOfZsts(_: ScalarPairLotsOfZsts) -> ScalarPairLotsOfZsts { loop {} }

type ScalarPairLottaNesting = (((), ((), u8, (), u128, ())), ());
// CHECK: define { i128, i8 } @test_ScalarPairLottaNesting(i128 %_1.0, i8 %_1.1)
#[no_mangle]
pub fn test_ScalarPairLottaNesting(_: ScalarPairLottaNesting) -> ScalarPairLottaNesting { loop {} }
43 changes: 43 additions & 0 deletions src/test/codegen/zst-offset.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
// compile-flags: -C no-prepopulate-passes

#![crate_type = "lib"]
#![feature(repr_simd)]

// Hack to get the correct size for the length part in slices
// CHECK: @helper([[USIZE:i[0-9]+]] %_1)
#[no_mangle]
pub fn helper(_: usize) {
}

// Check that we correctly generate a GEP for a ZST that is not included in Scalar layout
// CHECK-LABEL: @scalar_layout
#[no_mangle]
pub fn scalar_layout(s: &(u64, ())) {
// CHECK: [[X0:%[0-9]+]] = bitcast i64* %s to i8*
// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 8
let x = &s.1;
&x; // keep variable in an alloca
}

// Check that we correctly generate a GEP for a ZST that is not included in ScalarPair layout
// CHECK-LABEL: @scalarpair_layout
#[no_mangle]
pub fn scalarpair_layout(s: &(u64, u32, ())) {
// CHECK: [[X0:%[0-9]+]] = bitcast { i64, i32 }* %s to i8*
// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 12
let x = &s.2;
&x; // keep variable in an alloca
}

#[repr(simd)]
pub struct U64x4(u64, u64, u64, u64);

// Check that we correctly generate a GEP for a ZST that is not included in Vector layout
// CHECK-LABEL: @vector_layout
#[no_mangle]
pub fn vector_layout(s: &(U64x4, ())) {
// CHECK: [[X0:%[0-9]+]] = bitcast <4 x i64>* %s to i8*
// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 32
let x = &s.1;
&x; // keep variable in an alloca
}
26 changes: 26 additions & 0 deletions src/test/ui/dynamically-sized-types/dst-tuple-no-reorder.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
// run-pass

#![feature(unsized_tuple_coercion)]

// Ensure that unsizable fields that might be accessed don't get reordered

fn nonzero_size() {
let sized: (u8, [u32; 2]) = (123, [456, 789]);
let unsize: &(u8, [u32]) = &sized;
assert_eq!(unsize.0, 123);
assert_eq!(unsize.1.len(), 2);
assert_eq!(unsize.1[0], 456);
assert_eq!(unsize.1[1], 789);
}

fn zst() {
let sized: (u8, [u32; 0]) = (123, []);
let unsize: &(u8, [u32]) = &sized;
assert_eq!(unsize.0, 123);
assert_eq!(unsize.1.len(), 0);
}

pub fn main() {
nonzero_size();
zst();
}
22 changes: 22 additions & 0 deletions src/test/ui/dynamically-sized-types/dst-tuple-zst-offsets.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// run-pass

#![feature(unsized_tuple_coercion)]

// Check that we do not change the offsets of ZST fields when unsizing

fn scalar_layout() {
let sized: &(u8, [(); 13]) = &(123, [(); 13]);
let unsize: &(u8, [()]) = sized;
assert_eq!(sized.1.as_ptr(), unsize.1.as_ptr());
}

fn scalarpair_layout() {
let sized: &(u8, u16, [(); 13]) = &(123, 456, [(); 13]);
let unsize: &(u8, u16, [()]) = sized;
assert_eq!(sized.2.as_ptr(), unsize.2.as_ptr());
}

pub fn main() {
scalar_layout();
scalarpair_layout();
}
27 changes: 27 additions & 0 deletions src/test/ui/mir/mir_const_prop_tuple_field_reorder.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
// compile-flags: -Z mir-opt-level=2
// build-pass
#![crate_type="lib"]

// This used to ICE: const-prop did not account for field reordering of scalar pairs,
// and would generate a tuple like `(0x1337, VariantBar): (FooEnum, isize)`,
// causing assertion failures in codegen when trying to read 0x1337 at the wrong type.

pub enum FooEnum {
VariantBar,
VariantBaz,
VariantBuz,
}

pub fn wrong_index() -> isize {
let (_, b) = id((FooEnum::VariantBar, 0x1337));
b
}

pub fn wrong_index_two() -> isize {
let (_, (_, b)) = id(((), (FooEnum::VariantBar, 0x1338)));
b
}

fn id<T>(x: T) -> T {
x
}