From 837eff94a7f8bd755f8059a62f69774a3d117e4b Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Tue, 6 Dec 2022 00:07:28 -0500 Subject: [PATCH 1/4] cg_ssa, cg_llvm: remove pointee types --- compiler/rustc_codegen_llvm/src/abi.rs | 15 ++--- compiler/rustc_codegen_llvm/src/allocator.rs | 2 +- compiler/rustc_codegen_llvm/src/back/write.rs | 6 +- compiler/rustc_codegen_llvm/src/builder.rs | 23 ++++--- compiler/rustc_codegen_llvm/src/common.rs | 14 ++-- compiler/rustc_codegen_llvm/src/consts.rs | 12 ++-- compiler/rustc_codegen_llvm/src/context.rs | 41 ++++++------ .../rustc_codegen_llvm/src/debuginfo/gdb.rs | 2 +- compiler/rustc_codegen_llvm/src/intrinsic.rs | 65 +++++++++---------- compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 2 +- compiler/rustc_codegen_llvm/src/type_.rs | 27 ++------ compiler/rustc_codegen_llvm/src/type_of.rs | 22 +++---- compiler/rustc_codegen_llvm/src/va_arg.rs | 19 +++--- compiler/rustc_codegen_ssa/src/base.rs | 12 ++-- compiler/rustc_codegen_ssa/src/meth.rs | 4 +- compiler/rustc_codegen_ssa/src/mir/block.rs | 17 ++--- .../rustc_codegen_ssa/src/mir/intrinsic.rs | 14 ++-- compiler/rustc_codegen_ssa/src/mir/operand.rs | 2 +- compiler/rustc_codegen_ssa/src/mir/place.rs | 19 ++---- .../rustc_codegen_ssa/src/traits/type_.rs | 12 +--- src/test/ui/limits/issue-17913.stderr | 4 ++ 21 files changed, 143 insertions(+), 191 deletions(-) diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index a6fd2a7de6bd0..15b2e919f25a1 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -226,7 +226,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx)); + let cast_ptr_llty = bx.type_ptr(); let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty); bx.store(val, cast_dst, self.layout.align.abi); } else { @@ -346,7 +346,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx), PassMode::Cast(cast, _) => cast.llvm_type(cx), PassMode::Indirect { .. } => { - llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); + llargument_tys.push(cx.type_ptr()); cx.type_void() } }; @@ -374,9 +374,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } cast.llvm_type(cx) } - PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => { - cx.type_ptr_to(arg.memory_ty(cx)) - } + PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => cx.type_ptr(), }; llargument_tys.push(llarg_ty); } @@ -389,12 +387,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { - unsafe { - llvm::LLVMPointerType( - self.llvm_type(cx), - cx.data_layout().instruction_address_space.0 as c_uint, - ) - } + cx.type_ptr_in(cx.data_layout().instruction_address_space) } fn llvm_cconv(&self) -> llvm::CallConv { diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index fed56cdd43821..4a77c08a8fed6 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -26,7 +26,7 @@ pub(crate) unsafe fn codegen( tws => bug!("Unsupported target word size for int: {}", tws), }; let i8 = llvm::LLVMInt8TypeInContext(llcx); - let i8p = llvm::LLVMPointerType(i8, 0); + let i8p = llvm::LLVMPointerTypeInContext(llcx, 0); let void = llvm::LLVMVoidTypeInContext(llcx); for method in ALLOCATOR_METHODS { diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 97d0de47b3a6e..b54273c864dbb 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -927,7 +927,7 @@ fn create_msvc_imps( let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" }; unsafe { - let i8p_ty = Type::i8p_llcx(llcx); + let ptr_ty = Type::ptr_llcx(llcx); let globals = base::iter_globals(llmod) .filter(|&val| { llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage @@ -947,8 +947,8 @@ fn create_msvc_imps( .collect::>(); for (imp_name, val) in globals { - let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast()); - llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty)); + let imp = llvm::LLVMAddGlobal(llmod, ptr_ty, imp_name.as_ptr().cast()); + llvm::LLVMSetInitializer(imp, consts::ptrcast(val, ptr_ty)); llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage); } } diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 77dd15ef4d807..cb733af89c19c 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -641,7 +641,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { flags: MemFlags, ) -> &'ll Value { debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); - let ptr = self.check_store(val, ptr); + let ptr = self.check_store(ptr); unsafe { let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); let align = @@ -671,7 +671,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { size: Size, ) { debug!("Store {:?} -> {:?}", val, ptr); - let ptr = self.check_store(val, ptr); + let ptr = self.check_store(ptr); unsafe { let store = llvm::LLVMRustBuildAtomicStore( self.llbuilder, @@ -854,8 +854,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported"); let size = self.intcast(size, self.type_isize(), false); let is_volatile = flags.contains(MemFlags::VOLATILE); - let dst = self.pointercast(dst, self.type_i8p()); - let src = self.pointercast(src, self.type_i8p()); + let dst = self.pointercast(dst, self.type_ptr()); + let src = self.pointercast(src, self.type_ptr()); unsafe { llvm::LLVMRustBuildMemCpy( self.llbuilder, @@ -881,8 +881,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported"); let size = self.intcast(size, self.type_isize(), false); let is_volatile = flags.contains(MemFlags::VOLATILE); - let dst = self.pointercast(dst, self.type_i8p()); - let src = self.pointercast(src, self.type_i8p()); + let dst = self.pointercast(dst, self.type_ptr()); + let src = self.pointercast(src, self.type_ptr()); unsafe { llvm::LLVMRustBuildMemMove( self.llbuilder, @@ -905,7 +905,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { flags: MemFlags, ) { let is_volatile = flags.contains(MemFlags::VOLATILE); - let ptr = self.pointercast(ptr, self.type_i8p()); + let ptr = self.pointercast(ptr, self.type_ptr()); unsafe { llvm::LLVMRustBuildMemSet( self.llbuilder, @@ -1130,7 +1130,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) }; let llty = self.cx.type_func( - &[self.cx.type_i8p(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()], + &[self.cx.type_ptr(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()], self.cx.type_void(), ); let args = &[fn_name, hash, num_counters, index]; @@ -1340,10 +1340,9 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { ret.expect("LLVM does not have support for catchret") } - fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { + fn check_store(&mut self, ptr: &'ll Value) -> &'ll Value { let dest_ptr_ty = self.cx.val_ty(ptr); - let stored_ty = self.cx.val_ty(val); - let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); + let stored_ptr_ty = self.cx.type_ptr(); assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); @@ -1421,7 +1420,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { return; } - let ptr = self.pointercast(ptr, self.cx.type_i8p()); + let ptr = self.pointercast(ptr, self.cx.type_ptr()); self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]); } diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index acee9134fb96e..ec054ca61b61a 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -4,7 +4,6 @@ use crate::consts::{self, const_alloc_to_llvm}; pub use crate::context::CodegenCx; use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True}; use crate::type_::Type; -use crate::type_of::LayoutLlvmExt; use crate::value::Value; use rustc_ast::Mutability; @@ -13,7 +12,7 @@ use rustc_codegen_ssa::traits::*; use rustc_hir::def_id::DefId; use rustc_middle::bug; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; -use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; +use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::TyCtxt; use rustc_session::cstore::{DllCallingConvention, DllImport, PeImportNameType}; use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer, Size}; @@ -203,10 +202,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { }) .1; let len = s.len(); - let cs = consts::ptrcast( - str_global, - self.type_ptr_to(self.layout_of(self.tcx.types.str_).llvm_type(self)), - ); + let cs = consts::ptrcast(str_global, self.type_ptr()); (cs, self.const_usize(len as u64)) } @@ -279,7 +275,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { let llval = unsafe { llvm::LLVMRustConstInBoundsGEP2( self.type_i8(), - self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)), + self.const_bitcast(base_addr, self.type_ptr_in(base_addr_space)), &self.const_usize(offset.bytes()), 1, ) @@ -305,7 +301,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { ) -> PlaceRef<'tcx, &'ll Value> { let alloc_align = alloc.inner().align; assert_eq!(alloc_align, layout.align.abi); - let llty = self.type_ptr_to(layout.llvm_type(self)); + let llty = self.type_ptr(); let llval = if layout.size == Size::ZERO { let llval = self.const_usize(alloc_align.bytes()); unsafe { llvm::LLVMConstIntToPtr(llval, llty) } @@ -316,7 +312,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { let llval = unsafe { llvm::LLVMRustConstInBoundsGEP2( self.type_i8(), - self.const_bitcast(base_addr, self.type_i8p()), + self.const_bitcast(base_addr, self.type_ptr()), &self.const_usize(offset.bytes()), 1, ) diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 3c324359565c1..39ca675853687 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -114,7 +114,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation< value: Primitive::Pointer, valid_range: WrappingRange::full(dl.pointer_size), }, - cx.type_i8p_ext(address_space), + cx.type_ptr_in(address_space), )); next_offset = offset + pointer_size; } @@ -267,7 +267,7 @@ impl<'ll> CodegenCx<'ll, '_> { let g = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) { let llty = self.layout_of(ty).llvm_type(self); if let Some(g) = self.get_declared_value(sym) { - if self.val_ty(g) != self.type_ptr_to(llty) { + if self.val_ty(g) != self.type_ptr() { span_bug!(self.tcx.def_span(def_id), "Conflicting types for static"); } } @@ -570,16 +570,16 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { } } - /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*. + /// Add a global value to a list to be stored in the `llvm.used` variable, an array of ptr. fn add_used_global(&self, global: &'ll Value) { - let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) }; + let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_ptr()) }; self.used_statics.borrow_mut().push(cast); } /// Add a global value to a list to be stored in the `llvm.compiler.used` variable, - /// an array of i8*. + /// an array of ptr. fn add_compiler_used_global(&self, global: &'ll Value) { - let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) }; + let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_ptr()) }; self.compiler_used_statics.borrow_mut().push(cast); } } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 4dcc7cd54477d..16d1a495e6550 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -466,7 +466,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) { let section = cstr!("llvm.metadata"); - let array = self.const_array(self.type_ptr_to(self.type_i8()), values); + let array = self.const_array(self.type_ptr(), values); unsafe { let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr()); @@ -640,7 +640,7 @@ impl<'ll> CodegenCx<'ll, '_> { ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false)) } - let i8p = self.type_i8p(); + let ptr = self.type_ptr(); let void = self.type_void(); let i1 = self.type_i1(); let t_i8 = self.type_i8(); @@ -686,7 +686,7 @@ impl<'ll> CodegenCx<'ll, '_> { ifn!("llvm.trap", fn() -> void); ifn!("llvm.debugtrap", fn() -> void); - ifn!("llvm.frameaddress", fn(t_i32) -> i8p); + ifn!("llvm.frameaddress", fn(t_i32) -> ptr); ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); @@ -849,43 +849,43 @@ impl<'ll> CodegenCx<'ll, '_> { ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64); ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128); - ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void); - ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void); + ifn!("llvm.lifetime.start.p0i8", fn(t_i64, ptr) -> void); + ifn!("llvm.lifetime.end.p0i8", fn(t_i64, ptr) -> void); ifn!("llvm.expect.i1", fn(i1, i1) -> i1); - ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); + ifn!("llvm.eh.typeid.for", fn(ptr) -> t_i32); ifn!("llvm.localescape", fn(...) -> void); - ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p); - ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p); + ifn!("llvm.localrecover", fn(ptr, ptr, t_i32) -> ptr); + ifn!("llvm.x86.seh.recoverfp", fn(ptr, ptr) -> ptr); ifn!("llvm.assume", fn(i1) -> void); - ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); + ifn!("llvm.prefetch", fn(ptr, t_i32, t_i32, t_i32) -> void); // This isn't an "LLVM intrinsic", but LLVM's optimization passes // recognize it like one and we assume it exists in `core::slice::cmp` match self.sess().target.arch.as_ref() { - "avr" | "msp430" => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i16), - _ => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i32), + "avr" | "msp430" => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i16), + _ => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i32), } // variadic intrinsics - ifn!("llvm.va_start", fn(i8p) -> void); - ifn!("llvm.va_end", fn(i8p) -> void); - ifn!("llvm.va_copy", fn(i8p, i8p) -> void); + ifn!("llvm.va_start", fn(ptr) -> void); + ifn!("llvm.va_end", fn(ptr) -> void); + ifn!("llvm.va_copy", fn(ptr, ptr) -> void); if self.sess().instrument_coverage() { - ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void); + ifn!("llvm.instrprof.increment", fn(ptr, t_i64, t_i32, t_i32) -> void); } - ifn!("llvm.type.test", fn(i8p, t_metadata) -> i1); - ifn!("llvm.type.checked.load", fn(i8p, t_i32, t_metadata) -> mk_struct! {i8p, i1}); + ifn!("llvm.type.test", fn(ptr, t_metadata) -> i1); + ifn!("llvm.type.checked.load", fn(ptr, t_i32, t_metadata) -> mk_struct! {ptr, i1}); if self.sess().opts.debuginfo != DebugInfo::None { ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void); ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void); } - ifn!("llvm.ptrmask", fn(i8p, t_isize) -> i8p); + ifn!("llvm.ptrmask", fn(ptr, t_isize) -> ptr); None } @@ -899,12 +899,11 @@ impl<'ll> CodegenCx<'ll, '_> { let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() { Some(def_id) => self.get_static(def_id), _ => { - let ty = self - .type_struct(&[self.type_ptr_to(self.type_isize()), self.type_i8p()], false); + let ty = self.type_struct(&[self.type_ptr(), self.type_ptr()], false); self.declare_global("rust_eh_catch_typeinfo", ty) } }; - let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_i8p()); + let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_ptr()); self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo)); eh_catch_typeinfo } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs index 80fd9726fc780..d8a1987b1c9ba 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs @@ -19,7 +19,7 @@ use rustc_span::DebuggerVisualizerType; pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) { if needs_gdb_debug_scripts_section(bx) { let gdb_debug_scripts_section = - bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_i8p()); + bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_ptr()); // Load just the first byte as that's all that's necessary to force // LLVM to keep around the reference to the global. let volative_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section); diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 2f5dd519b2600..c2de779687df2 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -165,7 +165,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { let ptr = args[0].immediate(); let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { let llty = ty.llvm_type(self); - let ptr = self.pointercast(ptr, self.type_ptr_to(llty)); + let ptr = self.pointercast(ptr, self.type_ptr()); self.volatile_load(llty, ptr) } else { self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr) @@ -319,16 +319,16 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { self.const_bool(true) } else if use_integer_compare { let integer_ty = self.type_ix(layout.size().bits()); - let ptr_ty = self.type_ptr_to(integer_ty); + let ptr_ty = self.type_ptr(); let a_ptr = self.bitcast(a, ptr_ty); let a_val = self.load(integer_ty, a_ptr, layout.align().abi); let b_ptr = self.bitcast(b, ptr_ty); let b_val = self.load(integer_ty, b_ptr, layout.align().abi); self.icmp(IntPredicate::IntEQ, a_val, b_val) } else { - let i8p_ty = self.type_i8p(); - let a_ptr = self.bitcast(a, i8p_ty); - let b_ptr = self.bitcast(b, i8p_ty); + let ptr_ty = self.type_ptr(); + let a_ptr = self.bitcast(a, ptr_ty); + let b_ptr = self.bitcast(b, ptr_ty); let n = self.const_usize(layout.size().bytes()); let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]); match self.cx.sess().target.arch.as_ref() { @@ -385,8 +385,8 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { }; if !fn_abi.ret.is_ignore() { - if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { - let ptr_llty = self.type_ptr_to(ty.llvm_type(self)); + if let PassMode::Cast(_, _) = &fn_abi.ret.mode { + let ptr_llty = self.type_ptr(); let ptr = self.pointercast(result.llval, ptr_llty); self.store(llval, ptr, result.align); } else { @@ -412,8 +412,8 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value { // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time // optimization pass replaces calls to this intrinsic with code to test type membership. - let i8p_ty = self.type_i8p(); - let bitcast = self.bitcast(pointer, i8p_ty); + let ptr_ty = self.type_ptr(); + let bitcast = self.bitcast(pointer, ptr_ty); self.call_intrinsic("llvm.type.test", &[bitcast, typeid]) } @@ -444,7 +444,7 @@ fn try_intrinsic<'ll>( dest: &'ll Value, ) { if bx.sess().panic_strategy() == PanicStrategy::Abort { - let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.call(try_func_ty, None, try_func, &[data], None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. @@ -542,8 +542,8 @@ fn codegen_msvc_try<'ll>( // // More information can be found in libstd's seh.rs implementation. let ptr_align = bx.tcx().data_layout.pointer_align.abi; - let slot = bx.alloca(bx.type_i8p(), ptr_align); - let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + let slot = bx.alloca(bx.type_ptr(), ptr_align); + let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, try_func, &[data], normal, catchswitch, None); bx.switch_to_block(normal); @@ -566,10 +566,10 @@ fn codegen_msvc_try<'ll>( // // When modifying, make sure that the type_name string exactly matches // the one used in src/libpanic_unwind/seh.rs. - let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p()); + let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr()); let type_name = bx.const_bytes(b"rust_panic\0"); let type_info = - bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false); + bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false); let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info)); unsafe { llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage); @@ -586,15 +586,15 @@ fn codegen_msvc_try<'ll>( bx.switch_to_block(catchpad_rust); let flags = bx.const_i32(8); let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]); - let ptr = bx.load(bx.type_i8p(), slot, ptr_align); - let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + let ptr = bx.load(bx.type_ptr(), slot, ptr_align); + let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); bx.call(catch_ty, None, catch_func, &[data, ptr], Some(&funclet)); bx.catch_ret(&funclet, caught); // The flag value of 64 indicates a "catch-all". bx.switch_to_block(catchpad_foreign); let flags = bx.const_i32(64); - let null = bx.const_null(bx.type_i8p()); + let null = bx.const_null(bx.type_ptr()); let funclet = bx.catch_pad(cs, &[null, flags, null]); bx.call(catch_ty, None, catch_func, &[data, null], Some(&funclet)); bx.catch_ret(&funclet, caught); @@ -647,7 +647,7 @@ fn codegen_gnu_try<'ll>( let try_func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); - let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None); bx.switch_to_block(then); @@ -660,12 +660,12 @@ fn codegen_gnu_try<'ll>( // the landing pad clauses the exception's type had been matched to. // rust_try ignores the selector. bx.switch_to_block(catch); - let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); + let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false); let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1); - let tydesc = bx.const_null(bx.type_i8p()); + let tydesc = bx.const_null(bx.type_ptr()); bx.add_clause(vals, tydesc); let ptr = bx.extract_value(vals, 0); - let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); bx.call(catch_ty, None, catch_func, &[data, ptr], None); bx.ret(bx.const_i32(1)); }); @@ -711,7 +711,7 @@ fn codegen_emcc_try<'ll>( let try_func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); - let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None); bx.switch_to_block(then); @@ -724,10 +724,10 @@ fn codegen_emcc_try<'ll>( // the landing pad clauses the exception's type had been matched to. bx.switch_to_block(catch); let tydesc = bx.eh_catch_typeinfo(); - let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); + let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false); let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2); bx.add_clause(vals, tydesc); - bx.add_clause(vals, bx.const_null(bx.type_i8p())); + bx.add_clause(vals, bx.const_null(bx.type_ptr())); let ptr = bx.extract_value(vals, 0); let selector = bx.extract_value(vals, 1); @@ -740,7 +740,7 @@ fn codegen_emcc_try<'ll>( // create an alloca and pass a pointer to that. let ptr_align = bx.tcx().data_layout.pointer_align.abi; let i8_align = bx.tcx().data_layout.i8_align.abi; - let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false); + let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false); let catch_data = bx.alloca(catch_data_type, ptr_align); let catch_data_0 = bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]); @@ -748,9 +748,9 @@ fn codegen_emcc_try<'ll>( let catch_data_1 = bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]); bx.store(is_rust_panic, catch_data_1, i8_align); - let catch_data = bx.bitcast(catch_data, bx.type_i8p()); + let catch_data = bx.bitcast(catch_data, bx.type_ptr()); - let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); bx.call(catch_ty, None, catch_func, &[data, catch_data], None); bx.ret(bx.const_i32(1)); }); @@ -897,7 +897,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let place = PlaceRef::alloca(bx, args[0].layout); args[0].val.store(bx, place); let int_ty = bx.type_ix(expected_bytes * 8); - let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty)); + let ptr = bx.pointercast(place.llval, bx.cx.type_ptr()); bx.load(int_ty, ptr, Align::ONE) } _ => return_error!( @@ -1145,7 +1145,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE); bx.store(ze, ptr, Align::ONE); let array_ty = bx.type_array(bx.type_i8(), expected_bytes); - let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty)); + let ptr = bx.pointercast(ptr, bx.cx.type_ptr()); return Ok(bx.load(array_ty, ptr, Align::ONE)); } _ => return_error!( @@ -1293,7 +1293,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64, - mut no_pointers: usize, + no_pointers: usize, ) -> &'ll Type { // FIXME: use cx.layout_of(ty).llvm_type() ? let mut elem_ty = match *elem_ty.kind() { @@ -1302,9 +1302,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( ty::Float(v) => cx.type_float_from_ty(v), _ => unreachable!(), }; - while no_pointers > 0 { - elem_ty = cx.type_ptr_to(elem_ty); - no_pointers -= 1; + if no_pointers > 0 { + elem_ty = cx.type_ptr(); } cx.type_vector(elem_ty, vec_len) } diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 8a9392255b861..8097725834227 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1044,7 +1044,7 @@ extern "C" { // Operations on array, pointer, and vector types (sequence types) pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type; - pub fn LLVMPointerType(ElementType: &Type, AddressSpace: c_uint) -> &Type; + pub fn LLVMPointerTypeInContext(C: &Context, AddressSpace: c_uint) -> &Type; pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type; pub fn LLVMGetElementType(Ty: &Type) -> &Type; diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index 5772b7e1d812a..4108e326e188f 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -108,12 +108,6 @@ impl<'ll> CodegenCx<'ll, '_> { } } - pub(crate) fn type_pointee_for_align(&self, align: Align) -> &'ll Type { - // FIXME(eddyb) We could find a better approximation if ity.align < align. - let ity = Integer::approximate_align(self, align); - self.type_from_integer(ity) - } - /// Return a LLVM type that has at most the required alignment, /// and exactly the required size, as a best-effort padding array. pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type { @@ -185,17 +179,12 @@ impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() } } - fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type { - assert_ne!( - self.type_kind(ty), - TypeKind::Function, - "don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead or explicitly specify an address space if it makes sense" - ); - ty.ptr_to(AddressSpace::DATA) + fn type_ptr(&self) -> &'ll Type { + self.type_ptr_in(AddressSpace::DATA) } - fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Type { - ty.ptr_to(address_space) + fn type_ptr_in(&self, address_space: AddressSpace) -> &'ll Type { + unsafe { llvm::LLVMPointerTypeInContext(self.llcx, address_space.0) } } fn element_type(&self, ty: &'ll Type) -> &'ll Type { @@ -243,12 +232,8 @@ impl Type { unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) } } - pub fn i8p_llcx(llcx: &llvm::Context) -> &Type { - Type::i8_llcx(llcx).ptr_to(AddressSpace::DATA) - } - - fn ptr_to(&self, address_space: AddressSpace) -> &Type { - unsafe { llvm::LLVMPointerType(self, address_space.0) } + pub fn ptr_llcx(llcx: &llvm::Context) -> &Type { + unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::DATA.0) } } } diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 182adf8178571..4248063312191 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -232,12 +232,8 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { return llty; } let llty = match *self.ty.kind() { - ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx)) - } - ty::Adt(def, _) if def.is_box() => { - cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx)) - } + ty::Ref(..) | ty::RawPtr(ty::TypeAndMut { .. }) => cx.type_ptr(), + ty::Adt(def, _) if def.is_box() => cx.type_ptr(), ty::FnPtr(sig) => { cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty())) } @@ -313,14 +309,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { F32 => cx.type_f32(), F64 => cx.type_f64(), Pointer => { - // If we know the alignment, pick something better than i8. - let (pointee, address_space) = - if let Some(pointee) = self.pointee_info_at(cx, offset) { - (cx.type_pointee_for_align(pointee.align), pointee.address_space) - } else { - (cx.type_i8(), AddressSpace::DATA) - }; - cx.type_ptr_to_ext(pointee, address_space) + let address_space = if let Some(pointee) = self.pointee_info_at(cx, offset) { + pointee.address_space + } else { + AddressSpace::DATA + }; + cx.type_ptr_in(address_space) } } } diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index ceb3d5a84abf3..c1c1786f6a803 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -5,7 +5,7 @@ use crate::value::Value; use rustc_codegen_ssa::mir::operand::OperandRef; use rustc_codegen_ssa::{ common::IntPredicate, - traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods}, + traits::{BaseTypeMethods, BuilderMethods, ConstMethods}, }; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf}; use rustc_middle::ty::Ty; @@ -26,14 +26,13 @@ fn round_pointer_up_to_alignment<'ll>( fn emit_direct_ptr_va_arg<'ll, 'tcx>( bx: &mut Builder<'_, 'll, 'tcx>, list: OperandRef<'tcx, &'ll Value>, - llty: &'ll Type, size: Size, align: Align, slot_size: Align, allow_higher_align: bool, ) -> (&'ll Value, Align) { - let va_list_ty = bx.type_i8p(); - let va_list_ptr_ty = bx.type_ptr_to(va_list_ty); + let va_list_ty = bx.type_ptr(); + let va_list_ptr_ty = bx.type_ptr(); let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty { bx.bitcast(list.immediate(), va_list_ptr_ty) } else { @@ -43,7 +42,7 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi); let (addr, addr_align) = if allow_higher_align && align > slot_size { - (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align) + (round_pointer_up_to_alignment(bx, ptr, align, bx.type_ptr()), align) } else { (ptr, slot_size) }; @@ -56,9 +55,9 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big { let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32); let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]); - (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align) + (bx.bitcast(adjusted, bx.type_ptr()), addr_align) } else { - (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align) + (bx.bitcast(addr, bx.type_ptr()), addr_align) } } @@ -81,7 +80,7 @@ fn emit_ptr_va_arg<'ll, 'tcx>( (layout.llvm_type(bx.cx), layout.size, layout.align) }; let (addr, addr_align) = - emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align); + emit_direct_ptr_va_arg(bx, list, size, align.abi, slot_size, allow_higher_align); if indirect { let tmp_ret = bx.load(llty, addr, addr_align); bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi) @@ -146,7 +145,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( bx.cond_br(use_stack, on_stack, in_reg); bx.switch_to_block(in_reg); - let top_type = bx.type_i8p(); + let top_type = bx.type_ptr(); let top = bx.struct_gep(va_list_ty, va_list_addr, reg_top_index); let top = bx.load(top_type, top, bx.tcx().data_layout.pointer_align.abi); @@ -158,7 +157,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]); } let reg_type = layout.llvm_type(bx); - let reg_addr = bx.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type)); + let reg_addr = bx.bitcast(reg_addr, bx.type_ptr()); let reg_value = bx.load(reg_type, reg_addr, layout.align.abi); bx.br(end); diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 4f396e970ad70..d7084fa714f5c 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -169,10 +169,10 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target)); if let Some(entry_idx) = vptr_entry_idx { - let ptr_ty = cx.type_i8p(); + let ptr_ty = cx.type_ptr(); let ptr_align = cx.tcx().data_layout.pointer_align.abi; let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind); - let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty)); + let llvtable = bx.pointercast(old_info, bx.type_ptr()); let gep = bx.inbounds_gep( ptr_ty, llvtable, @@ -226,7 +226,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { assert_eq!(bx.cx().type_is_sized(a), old_info.is_none()); - let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b))); + let ptr_ty = bx.type_ptr(); (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { @@ -433,7 +433,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // The entry function is either `int main(void)` or `int main(int argc, char **argv)`, // depending on whether the target needs `argc` and `argv` to be passed in. let llfty = if cx.sess().target.main_needs_argc_argv { - cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int()) + cx.type_func(&[cx.type_int(), cx.type_ptr()], cx.type_int()) } else { cx.type_func(&[], cx.type_int()) }; @@ -470,7 +470,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( bx.insert_reference_to_gdb_debug_scripts_section_global(); let isize_ty = cx.type_isize(); - let i8pp_ty = cx.type_ptr_to(cx.type_i8p()); + let i8pp_ty = cx.type_ptr(); let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx); let (start_fn, start_ty, args) = if let EntryFnType::Main { sigpipe } = entry_type { @@ -521,7 +521,7 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } else { // The Rust start function doesn't need `argc` and `argv`, so just pass zeros. let arg_argc = bx.const_int(cx.type_int(), 0); - let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p())); + let arg_argv = bx.const_null(cx.type_ptr()); (arg_argc, arg_argv) } } diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs index cae46ebd2e9a7..24fede6c396e3 100644 --- a/compiler/rustc_codegen_ssa/src/meth.rs +++ b/compiler/rustc_codegen_ssa/src/meth.rs @@ -23,7 +23,7 @@ impl<'a, 'tcx> VirtualIndex { // Load the data pointer from the object. debug!("get_fn({llvtable:?}, {ty:?}, {self:?})"); let llty = bx.fn_ptr_backend_type(fn_abi); - let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty)); + let llvtable = bx.pointercast(llvtable, bx.type_ptr()); if bx.cx().sess().opts.unstable_opts.virtual_function_elimination && bx.cx().sess().lto() == Lto::Fat @@ -54,7 +54,7 @@ impl<'a, 'tcx> VirtualIndex { debug!("get_int({:?}, {:?})", llvtable, self); let llty = bx.type_isize(); - let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty)); + let llvtable = bx.pointercast(llvtable, bx.type_ptr()); let usize_align = bx.tcx().data_layout.pointer_align.abi; let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]); let ptr = bx.load(llty, gep, usize_align); diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 03d833fbba87c..c0cfdccbe917e 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -426,7 +426,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } }; let ty = bx.cast_backend_type(cast_ty); - let addr = bx.pointercast(llslot, bx.type_ptr_to(ty)); + let addr = bx.pointercast(llslot, bx.type_ptr()); bx.load(ty, addr, self.fn_abi.ret.layout.align.abi) } }; @@ -529,7 +529,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let data_ty = bx.cx().backend_type(place.layout); let vtable_ptr = bx.gep(data_ty, data, &[bx.cx().const_i32(0), bx.cx().const_i32(1)]); - let vtable = bx.load(bx.type_i8p(), vtable_ptr, abi::Align::ONE); + let vtable = bx.load(bx.type_ptr(), vtable_ptr, abi::Align::ONE); // Truncate vtable off of args list args = &args[..1]; debug!("args' = {:?}", args); @@ -857,9 +857,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Some(intrinsic) => { let dest = match ret_dest { _ if fn_abi.ret.is_indirect() => llargs[0], - ReturnDest::Nothing => { - bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret))) - } + ReturnDest::Nothing => bx.const_undef(bx.type_ptr()), ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, ReturnDest::DirectOperand(_) => { bug!("Cannot use direct operand with an intrinsic call") @@ -1448,7 +1446,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Have to load the argument, maybe while casting it. if let PassMode::Cast(ty, _) = &arg.mode { let llty = bx.cast_backend_type(ty); - let addr = bx.pointercast(llval, bx.type_ptr_to(llty)); + let addr = bx.pointercast(llval, bx.type_ptr()); llval = bx.load(llty, addr, align.min(arg.layout.align.abi)); } else { // We can't use `PlaceRef::load` here because the argument @@ -1613,7 +1611,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // represents that this is a catch-all block. let mut cp_bx = Bx::build(self.cx, cp_llbb); let null = cp_bx.const_null( - cp_bx.type_i8p_ext(cp_bx.cx().data_layout().instruction_address_space), + cp_bx.type_ptr_in(cp_bx.cx().data_layout().instruction_address_space), ); let sixty_four = cp_bx.const_i32(64); funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]); @@ -1650,7 +1648,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { fn landing_pad_type(&self) -> Bx::Type { let cx = self.cx; - cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false) + cx.type_struct(&[cx.type_ptr(), cx.type_i32()], false) } fn unreachable_block(&mut self) -> Bx::BasicBlock { @@ -1828,8 +1826,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { _ => {} } - let llty = bx.backend_type(src.layout); - let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty)); + let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr()); let align = src.layout.align.abi.min(dst.align); src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, align)); } diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 215edbe02c08e..dc683b510ee5a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -381,7 +381,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { sym::const_allocate => { // returns a null pointer at runtime. - bx.const_null(bx.type_i8p()) + bx.const_null(bx.type_ptr()) } sym::const_deallocate => { @@ -435,7 +435,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if ty.is_unsafe_ptr() { // Some platforms do not support atomic operations on pointers, // so we cast to integer first. - let ptr_llty = bx.type_ptr_to(bx.type_isize()); + let ptr_llty = bx.type_ptr(); dst = bx.pointercast(dst, ptr_llty); cmp = bx.ptrtoint(cmp, bx.type_isize()); src = bx.ptrtoint(src, bx.type_isize()); @@ -466,7 +466,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Some platforms do not support atomic operations on pointers, // so we cast to integer first... let llty = bx.type_isize(); - let ptr_llty = bx.type_ptr_to(llty); + let ptr_llty = bx.type_ptr(); source = bx.pointercast(source, ptr_llty); let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size); // ... and then cast the result back to a pointer @@ -488,7 +488,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if ty.is_unsafe_ptr() { // Some platforms do not support atomic operations on pointers, // so we cast to integer first. - let ptr_llty = bx.type_ptr_to(bx.type_isize()); + let ptr_llty = bx.type_ptr(); ptr = bx.pointercast(ptr, ptr_llty); val = bx.ptrtoint(val, bx.type_isize()); } @@ -533,7 +533,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if ty.is_unsafe_ptr() { // Some platforms do not support atomic operations on pointers, // so we cast to integer first. - let ptr_llty = bx.type_ptr_to(bx.type_isize()); + let ptr_llty = bx.type_ptr(); ptr = bx.pointercast(ptr, ptr_llty); val = bx.ptrtoint(val, bx.type_isize()); } @@ -589,8 +589,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; if !fn_abi.ret.is_ignore() { - if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { - let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(ty)); + if let PassMode::Cast(_, _) = &fn_abi.ret.mode { + let ptr_llty = bx.type_ptr(); let ptr = bx.pointercast(result.llval, ptr_llty); bx.store(llval, ptr, result.align); } else { diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 34a5b638d7eba..763ccea84a2fc 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -292,7 +292,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. let ty = bx.backend_type(dest.layout); - let ptr = bx.pointercast(r, bx.type_ptr_to(ty)); + let ptr = bx.pointercast(r, bx.type_ptr()); let val = bx.load(ty, ptr, source_align); bx.store_with_flags(val, dest.llval, dest.align, flags); return; diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index fbe30154a7c8d..b08ea5998a1d9 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -107,7 +107,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { } Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer. - let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); + let byte_ptr = bx.pointercast(self.llval, bx.type_ptr()); bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())]) } Abi::Scalar(_) | Abi::ScalarPair(..) => { @@ -126,7 +126,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { }; PlaceRef { // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types. - llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))), + llval: bx.pointercast(llval, bx.type_ptr()), llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None }, layout: field, align: effective_field_align, @@ -187,7 +187,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { debug!("struct_field_ptr: DST field offset: {:?}", offset); // Cast and adjust pointer. - let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); + let byte_ptr = bx.pointercast(self.llval, bx.type_ptr()); let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]); // Finally, cast back to the type expected. @@ -195,7 +195,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { debug!("struct_field_ptr: Field type is {:?}", ll_fty); PlaceRef { - llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)), + llval: bx.pointercast(byte_ptr, bx.type_ptr()), llextra: self.llextra, layout: field, align: effective_field_align, @@ -499,8 +499,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { downcast.layout = self.layout.for_variant(bx.cx(), variant_index); // Cast to the appropriate variant struct type. - let variant_ty = bx.cx().backend_type(downcast.layout); - downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); + downcast.llval = bx.pointercast(downcast.llval, bx.type_ptr()); downcast } @@ -514,8 +513,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { downcast.layout = bx.cx().layout_of(ty); // Cast to the appropriate type. - let variant_ty = bx.cx().backend_type(downcast.layout); - downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); + downcast.llval = bx.pointercast(downcast.llval, bx.type_ptr()); downcast } @@ -595,10 +593,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Cast the place pointer type to the new // array or slice type (`*[%_; new_len]`). - subslice.llval = bx.pointercast( - subslice.llval, - bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)), - ); + subslice.llval = bx.pointercast(subslice.llval, bx.cx().type_ptr()); subslice } diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs index 86481d5d758d6..cc3bd9f54a3e4 100644 --- a/compiler/rustc_codegen_ssa/src/traits/type_.rs +++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs @@ -26,8 +26,8 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type; fn type_kind(&self, ty: Self::Type) -> TypeKind; - fn type_ptr_to(&self, ty: Self::Type) -> Self::Type; - fn type_ptr_to_ext(&self, ty: Self::Type, address_space: AddressSpace) -> Self::Type; + fn type_ptr(&self) -> Self::Type; + fn type_ptr_in(&self, address_space: AddressSpace) -> Self::Type; fn element_type(&self, ty: Self::Type) -> Self::Type; /// Returns the number of elements in `self` if it is a LLVM vector type. @@ -42,14 +42,6 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { } pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { - fn type_i8p(&self) -> Self::Type { - self.type_i8p_ext(AddressSpace::DATA) - } - - fn type_i8p_ext(&self, address_space: AddressSpace) -> Self::Type { - self.type_ptr_to_ext(self.type_i8(), address_space) - } - fn type_int(&self) -> Self::Type { match &self.sess().target.c_int_width[..] { "16" => self.type_i16(), diff --git a/src/test/ui/limits/issue-17913.stderr b/src/test/ui/limits/issue-17913.stderr index 9a6431d447004..69603f9e23ed6 100644 --- a/src/test/ui/limits/issue-17913.stderr +++ b/src/test/ui/limits/issue-17913.stderr @@ -1,4 +1,8 @@ error: values of the type `[&usize; N]` are too big for the current architecture + --> $SRC_DIR/alloc/src/boxed.rs:LL:COL + | +LL | pub fn new(x: T) -> Self { + | ^^^^^^^^^^^^^^^^^^^^^^^^ error: aborting due to previous error From 5d2b7ba637978a98624a246b632e3a00cbfc4fe0 Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Tue, 6 Dec 2022 22:47:50 -0500 Subject: [PATCH 2/4] cg_gcc: adapt for removal of pointee types from cg_ssa i expected to need to add casts to load/store/memcpy/memmove/memset/etc. but they already pointercast their arguments, so with the (relatively minimal) number of pointer types created in cg_ssa, this should work? --- compiler/rustc_codegen_gcc/src/builder.rs | 1 - compiler/rustc_codegen_gcc/src/common.rs | 9 +++----- .../rustc_codegen_gcc/src/intrinsic/mod.rs | 2 +- compiler/rustc_codegen_gcc/src/type_.rs | 22 ++++++++++++++----- 4 files changed, 21 insertions(+), 13 deletions(-) diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 782f6856654a6..8d33a89aec3f4 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -27,7 +27,6 @@ use rustc_codegen_ssa::traits::{ BaseTypeMethods, BuilderMethods, ConstMethods, - DerivedTypeMethods, LayoutTypeMethods, HasCodegen, OverflowOp, diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index aa1c271c31cb4..a2aba602d616b 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -4,7 +4,6 @@ use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{ BaseTypeMethods, ConstMethods, - DerivedTypeMethods, MiscMethods, StaticMethods, }; @@ -132,7 +131,9 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { .or_insert_with(|| (s.to_owned(), self.global_string(s))) .1; let len = s.len(); - let cs = self.const_ptrcast(str_global.get_address(None), + let cs = self.context.new_cast( + None, + str_global.get_address(None), self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self, true)), ); (cs, self.const_usize(len as u64)) @@ -243,10 +244,6 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { }; PlaceRef::new_sized(value, layout) } - - fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> { - self.context.new_cast(None, val, ty) - } } pub trait SignType<'gcc, 'tcx> { diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index 49be6c649e652..3a7d1c659446b 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -7,7 +7,7 @@ use rustc_codegen_ssa::base::wants_msvc_seh; use rustc_codegen_ssa::common::IntPredicate; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::PlaceRef; -use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods}; +use rustc_codegen_ssa::traits::{ArgAbiMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods}; use rustc_middle::bug; use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::ty::layout::LayoutOf; diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs index bdf7318ce48c9..85a843068a37f 100644 --- a/compiler/rustc_codegen_gcc/src/type_.rs +++ b/compiler/rustc_codegen_gcc/src/type_.rs @@ -56,6 +56,19 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { self.u128_type } + pub fn type_i8p(&self) -> Type<'gcc> { + self.type_ptr_to(self.type_i8()) + } + + pub fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> { + ty.make_pointer() + } + + pub fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> { + // TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE? + ty.make_pointer() + } + pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> { // FIXME(eddyb) We could find a better approximation if ity.align < align. let ity = Integer::approximate_align(self, align); @@ -151,13 +164,12 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } } - fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> { - ty.make_pointer() + fn type_ptr(&self) -> Type<'gcc> { + self.type_ptr_to(self.type_i8()) } - fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> { - // TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE? - ty.make_pointer() + fn type_ptr_in(&self, address_space: AddressSpace) -> Type<'gcc> { + self.type_ptr_to_ext(self.type_i8(), address_space) } fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> { From 0fcc9beb41f606c72a42e30b864ad52da5bc3304 Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Tue, 6 Dec 2022 00:22:34 -0500 Subject: [PATCH 3/4] cg_llvm: stop identifying ADTs when `fewer_names` (issue 96242) --- compiler/rustc_codegen_llvm/src/type_of.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 4248063312191..1cb83068d5805 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -63,9 +63,10 @@ fn uncached_llvm_type<'a, 'tcx>( } Some(name) } - // Use identified structure types for ADT. Due to pointee types in LLVM IR their definition + // In LLVM < 15, use identified structure types for ADT. Due to pointee types in LLVM IR their definition // might be recursive. Other cases are non-recursive and we can use literal structure types. - ty::Adt(..) => Some(String::new()), + // In LLVM 15, we use opaque pointers, so there are no pointee types and no potential recursion. + ty::Adt(..) if get_version() < (15, 0, 0) => Some(String::new()), _ => None, }; From 68cabc0c87fe4f89ab3a6434b5adc7d5ddf77b87 Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Tue, 6 Dec 2022 01:14:14 -0500 Subject: [PATCH 4/4] cg_ssa, cg_llvm: remove unnecessary pointercast/bitcast-of-ptr --- compiler/rustc_codegen_llvm/src/abi.rs | 4 +-- compiler/rustc_codegen_llvm/src/back/write.rs | 3 +- compiler/rustc_codegen_llvm/src/base.rs | 3 +- compiler/rustc_codegen_llvm/src/builder.rs | 18 +--------- compiler/rustc_codegen_llvm/src/callee.rs | 36 +------------------ compiler/rustc_codegen_llvm/src/common.rs | 13 +++---- compiler/rustc_codegen_llvm/src/consts.rs | 10 ++---- compiler/rustc_codegen_llvm/src/context.rs | 13 ------- .../rustc_codegen_llvm/src/debuginfo/gdb.rs | 3 +- compiler/rustc_codegen_llvm/src/intrinsic.rs | 32 ++++------------- compiler/rustc_codegen_llvm/src/va_arg.rs | 12 ++----- compiler/rustc_codegen_ssa/src/base.rs | 32 +++-------------- compiler/rustc_codegen_ssa/src/meth.rs | 3 +- compiler/rustc_codegen_ssa/src/mir/block.rs | 9 ++--- .../rustc_codegen_ssa/src/mir/intrinsic.rs | 20 +++-------- compiler/rustc_codegen_ssa/src/mir/operand.rs | 3 +- compiler/rustc_codegen_ssa/src/mir/place.rs | 29 +++------------ compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 19 +++------- .../rustc_codegen_ssa/src/traits/consts.rs | 2 -- 19 files changed, 46 insertions(+), 218 deletions(-) diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 15b2e919f25a1..eaa610dcb6c1a 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -226,9 +226,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_ptr_llty = bx.type_ptr(); - let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty); - bx.store(val, cast_dst, self.layout.align.abi); + bx.store(val, dst.llval, self.layout.align.abi); } else { // The actual return type is a struct, but the ABI // adaptation code has cast it into some scalar type. The diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index b54273c864dbb..733ef463b7ed4 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -4,7 +4,6 @@ use crate::back::profiling::{ }; use crate::base; use crate::common; -use crate::consts; use crate::llvm::{self, DiagnosticInfo, PassManager}; use crate::llvm_util; use crate::type_::Type; @@ -948,7 +947,7 @@ fn create_msvc_imps( for (imp_name, val) in globals { let imp = llvm::LLVMAddGlobal(llmod, ptr_ty, imp_name.as_ptr().cast()); - llvm::LLVMSetInitializer(imp, consts::ptrcast(val, ptr_ty)); + llvm::LLVMSetInitializer(imp, val); llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage); } } diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index 5b2bbdb4bde1e..324bbfff026c0 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -123,8 +123,7 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen // happen after the llvm.used variables are created. for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() { unsafe { - let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g)); - llvm::LLVMReplaceAllUsesWith(old_g, bitcast); + llvm::LLVMReplaceAllUsesWith(old_g, new_g); llvm::LLVMDeleteGlobal(old_g); } } diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index cb733af89c19c..084013f43e38e 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -854,8 +854,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported"); let size = self.intcast(size, self.type_isize(), false); let is_volatile = flags.contains(MemFlags::VOLATILE); - let dst = self.pointercast(dst, self.type_ptr()); - let src = self.pointercast(src, self.type_ptr()); unsafe { llvm::LLVMRustBuildMemCpy( self.llbuilder, @@ -881,8 +879,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported"); let size = self.intcast(size, self.type_isize(), false); let is_volatile = flags.contains(MemFlags::VOLATILE); - let dst = self.pointercast(dst, self.type_ptr()); - let src = self.pointercast(src, self.type_ptr()); unsafe { llvm::LLVMRustBuildMemMove( self.llbuilder, @@ -905,7 +901,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { flags: MemFlags, ) { let is_volatile = flags.contains(MemFlags::VOLATILE); - let ptr = self.pointercast(ptr, self.type_ptr()); unsafe { llvm::LLVMRustBuildMemSet( self.llbuilder, @@ -1342,20 +1337,10 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { fn check_store(&mut self, ptr: &'ll Value) -> &'ll Value { let dest_ptr_ty = self.cx.val_ty(ptr); - let stored_ptr_ty = self.cx.type_ptr(); assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); - if dest_ptr_ty == stored_ptr_ty { - ptr - } else { - debug!( - "type mismatch in store. \ - Expected {:?}, got {:?}; inserting bitcast", - dest_ptr_ty, stored_ptr_ty - ); - self.bitcast(ptr, stored_ptr_ty) - } + ptr } fn check_call<'b>( @@ -1420,7 +1405,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { return; } - let ptr = self.pointercast(ptr, self.cx.type_ptr()); self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]); } diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs index 70ff5c9617b7a..b88d7f0b511ac 100644 --- a/compiler/rustc_codegen_llvm/src/callee.rs +++ b/compiler/rustc_codegen_llvm/src/callee.rs @@ -4,13 +4,11 @@ //! and methods are represented as just a fn ptr and not a full //! closure. -use crate::abi::FnAbiLlvmExt; use crate::attributes; use crate::common; use crate::context::CodegenCx; use crate::llvm; use crate::value::Value; -use rustc_codegen_ssa::traits::*; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; use rustc_middle::ty::{self, Instance, TypeVisitable}; @@ -45,39 +43,7 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty()); let llfn = if let Some(llfn) = cx.get_declared_value(sym) { - // Create a fn pointer with the new signature. - let llptrty = fn_abi.ptr_to_llvm_type(cx); - - // This is subtle and surprising, but sometimes we have to bitcast - // the resulting fn pointer. The reason has to do with external - // functions. If you have two crates that both bind the same C - // library, they may not use precisely the same types: for - // example, they will probably each declare their own structs, - // which are distinct types from LLVM's point of view (nominal - // types). - // - // Now, if those two crates are linked into an application, and - // they contain inlined code, you can wind up with a situation - // where both of those functions wind up being loaded into this - // application simultaneously. In that case, the same function - // (from LLVM's point of view) requires two types. But of course - // LLVM won't allow one function to have two types. - // - // What we currently do, therefore, is declare the function with - // one of the two types (whichever happens to come first) and then - // bitcast as needed when the function is referenced to make sure - // it has the type we expect. - // - // This can occur on either a crate-local or crate-external - // reference. It also occurs when testing libcore and in some - // other weird situations. Annoying. - if cx.val_ty(llfn) != llptrty { - debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); - cx.const_ptrcast(llfn, llptrty) - } else { - debug!("get_fn: not casting pointer!"); - llfn - } + llfn } else { let instance_def_id = instance.def_id(); let llfn = if tcx.sess.target.arch == "x86" && diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index ec054ca61b61a..42e8a31f46a05 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -1,6 +1,6 @@ //! Code that is useful in various codegen modules. -use crate::consts::{self, const_alloc_to_llvm}; +use crate::consts::const_alloc_to_llvm; pub use crate::context::CodegenCx; use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True}; use crate::type_::Type; @@ -202,8 +202,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { }) .1; let len = s.len(); - let cs = consts::ptrcast(str_global, self.type_ptr()); - (cs, self.const_usize(len as u64)) + (str_global, self.const_usize(len as u64)) } fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value { @@ -312,19 +311,15 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { let llval = unsafe { llvm::LLVMRustConstInBoundsGEP2( self.type_i8(), - self.const_bitcast(base_addr, self.type_ptr()), + base_addr, &self.const_usize(offset.bytes()), 1, ) }; - self.const_bitcast(llval, llty) + llval }; PlaceRef::new_sized(llval, layout) } - - fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { - consts::ptrcast(val, ty) - } } /// Get the [LLVM type][Type] of a [`Value`]. diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 39ca675853687..7a9ef0518493d 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -209,10 +209,6 @@ fn check_and_apply_linkage<'ll, 'tcx>( } } -pub fn ptrcast<'ll>(val: &'ll Value, ty: &'ll Type) -> &'ll Value { - unsafe { llvm::LLVMConstPointerCast(val, ty) } -} - impl<'ll> CodegenCx<'ll, '_> { pub(crate) fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMConstBitCast(val, ty) } @@ -572,14 +568,12 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { /// Add a global value to a list to be stored in the `llvm.used` variable, an array of ptr. fn add_used_global(&self, global: &'ll Value) { - let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_ptr()) }; - self.used_statics.borrow_mut().push(cast); + self.used_statics.borrow_mut().push(global); } /// Add a global value to a list to be stored in the `llvm.compiler.used` variable, /// an array of ptr. fn add_compiler_used_global(&self, global: &'ll Value) { - let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_ptr()) }; - self.compiler_used_statics.borrow_mut().push(cast); + self.compiler_used_statics.borrow_mut().push(global); } } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 16d1a495e6550..b9941a78bd5c1 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -59,17 +59,6 @@ pub struct CodegenCx<'ll, 'tcx> { /// Cache of constant strings, pub const_str_cache: RefCell>, - /// Reverse-direction for const ptrs cast from globals. - /// - /// Key is a Value holding a `*T`, - /// Val is a Value holding a `*[T]`. - /// - /// Needed because LLVM loses pointer->pointee association - /// when we ptrcast, and we have to ptrcast during codegen - /// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not - /// a pointer to an LLVM array type. Similar for trait objects. - pub const_unsized: RefCell>, - /// Cache of emitted const globals (value -> global) pub const_globals: RefCell>, @@ -435,7 +424,6 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { instances: Default::default(), vtables: Default::default(), const_str_cache: Default::default(), - const_unsized: Default::default(), const_globals: Default::default(), statics_to_rauw: RefCell::new(Vec::new()), used_statics: RefCell::new(Vec::new()), @@ -903,7 +891,6 @@ impl<'ll> CodegenCx<'ll, '_> { self.declare_global("rust_eh_catch_typeinfo", ty) } }; - let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_ptr()); self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo)); eh_catch_typeinfo } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs index d8a1987b1c9ba..cbfae9f33cbce 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs @@ -18,8 +18,7 @@ use rustc_span::DebuggerVisualizerType; /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) { if needs_gdb_debug_scripts_section(bx) { - let gdb_debug_scripts_section = - bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_ptr()); + let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx); // Load just the first byte as that's all that's necessary to force // LLVM to keep around the reference to the global. let volative_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section); diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index c2de779687df2..6cd9dbab39ed9 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -165,7 +165,6 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { let ptr = args[0].immediate(); let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { let llty = ty.llvm_type(self); - let ptr = self.pointercast(ptr, self.type_ptr()); self.volatile_load(llty, ptr) } else { self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr) @@ -319,18 +318,12 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { self.const_bool(true) } else if use_integer_compare { let integer_ty = self.type_ix(layout.size().bits()); - let ptr_ty = self.type_ptr(); - let a_ptr = self.bitcast(a, ptr_ty); - let a_val = self.load(integer_ty, a_ptr, layout.align().abi); - let b_ptr = self.bitcast(b, ptr_ty); - let b_val = self.load(integer_ty, b_ptr, layout.align().abi); + let a_val = self.load(integer_ty, a, layout.align().abi); + let b_val = self.load(integer_ty, b, layout.align().abi); self.icmp(IntPredicate::IntEQ, a_val, b_val) } else { - let ptr_ty = self.type_ptr(); - let a_ptr = self.bitcast(a, ptr_ty); - let b_ptr = self.bitcast(b, ptr_ty); let n = self.const_usize(layout.size().bytes()); - let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]); + let cmp = self.call_intrinsic("memcmp", &[a, b, n]); match self.cx.sess().target.arch.as_ref() { "avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)), _ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)), @@ -386,9 +379,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { if !fn_abi.ret.is_ignore() { if let PassMode::Cast(_, _) = &fn_abi.ret.mode { - let ptr_llty = self.type_ptr(); - let ptr = self.pointercast(result.llval, ptr_llty); - self.store(llval, ptr, result.align); + self.store(llval, result.llval, result.align); } else { OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) .val @@ -412,9 +403,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value { // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time // optimization pass replaces calls to this intrinsic with code to test type membership. - let ptr_ty = self.type_ptr(); - let bitcast = self.bitcast(pointer, ptr_ty); - self.call_intrinsic("llvm.type.test", &[bitcast, typeid]) + self.call_intrinsic("llvm.type.test", &[pointer, typeid]) } fn type_checked_load( @@ -748,7 +737,6 @@ fn codegen_emcc_try<'ll>( let catch_data_1 = bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]); bx.store(is_rust_panic, catch_data_1, i8_align); - let catch_data = bx.bitcast(catch_data, bx.type_ptr()); let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); bx.call(catch_ty, None, catch_func, &[data, catch_data], None); @@ -897,8 +885,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let place = PlaceRef::alloca(bx, args[0].layout); args[0].val.store(bx, place); let int_ty = bx.type_ix(expected_bytes * 8); - let ptr = bx.pointercast(place.llval, bx.cx.type_ptr()); - bx.load(int_ty, ptr, Align::ONE) + bx.load(int_ty, place.llval, Align::ONE) } _ => return_error!( "invalid bitmask `{}`, expected `u{}` or `[u8; {}]`", @@ -1145,7 +1132,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE); bx.store(ze, ptr, Align::ONE); let array_ty = bx.type_array(bx.type_i8(), expected_bytes); - let ptr = bx.pointercast(ptr, bx.cx.type_ptr()); return Ok(bx.load(array_ty, ptr, Align::ONE)); } _ => return_error!( @@ -1763,11 +1749,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, _ => return_error!("expected pointer, got `{}`", out_elem), } - if in_elem == out_elem { - return Ok(args[0].immediate()); - } else { - return Ok(bx.pointercast(args[0].immediate(), llret_ty)); - } + return Ok(args[0].immediate()); } if name == sym::simd_expose_addr { diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index c1c1786f6a803..a5f5bb9ad2060 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -32,12 +32,7 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( allow_higher_align: bool, ) -> (&'ll Value, Align) { let va_list_ty = bx.type_ptr(); - let va_list_ptr_ty = bx.type_ptr(); - let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty { - bx.bitcast(list.immediate(), va_list_ptr_ty) - } else { - list.immediate() - }; + let va_list_addr = list.immediate(); let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi); @@ -55,9 +50,9 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big { let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32); let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]); - (bx.bitcast(adjusted, bx.type_ptr()), addr_align) + (adjusted, addr_align) } else { - (bx.bitcast(addr, bx.type_ptr()), addr_align) + (addr, addr_align) } } @@ -157,7 +152,6 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]); } let reg_type = layout.llvm_type(bx); - let reg_addr = bx.bitcast(reg_addr, bx.type_ptr()); let reg_value = bx.load(reg_type, reg_addr, layout.align.abi); bx.br(end); diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index d7084fa714f5c..16e435ea50ea5 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -171,48 +171,25 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( if let Some(entry_idx) = vptr_entry_idx { let ptr_ty = cx.type_ptr(); let ptr_align = cx.tcx().data_layout.pointer_align.abi; - let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind); - let llvtable = bx.pointercast(old_info, bx.type_ptr()); let gep = bx.inbounds_gep( ptr_ty, - llvtable, + old_info, &[bx.const_usize(u64::try_from(entry_idx).unwrap())], ); let new_vptr = bx.load(ptr_ty, gep, ptr_align); bx.nonnull_metadata(new_vptr); // VTable loads are invariant. bx.set_invariant_load(new_vptr); - bx.pointercast(new_vptr, vtable_ptr_ty) + new_vptr } else { old_info } } - (_, &ty::Dynamic(ref data, _, target_dyn_kind)) => { - let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind); - cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty) - } + (_, &ty::Dynamic(ref data, _, _)) => meth::get_vtable(cx, source, data.principal()), _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target), } } -// Returns the vtable pointer type of a `dyn` or `dyn*` type -fn vtable_ptr_ty<'tcx, Cx: CodegenMethods<'tcx>>( - cx: &Cx, - target: Ty<'tcx>, - kind: ty::DynKind, -) -> ::Type { - cx.scalar_pair_element_backend_type( - cx.layout_of(match kind { - // vtable is the second field of `*mut dyn Trait` - ty::Dyn => cx.tcx().mk_mut_ptr(target), - // vtable is the second field of `dyn* Trait` - ty::DynStar => target, - }), - 1, - true, - ) -} - /// Coerces `src` to `dst_ty`. `src_ty` must be a pointer. pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, @@ -226,8 +203,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { assert_eq!(bx.cx().type_is_sized(a), old_info.is_none()); - let ptr_ty = bx.type_ptr(); - (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info)) + (src, unsized_info(bx, a, b, old_info)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { assert_eq!(def_a, def_b); diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs index 24fede6c396e3..06523040b812d 100644 --- a/compiler/rustc_codegen_ssa/src/meth.rs +++ b/compiler/rustc_codegen_ssa/src/meth.rs @@ -23,7 +23,6 @@ impl<'a, 'tcx> VirtualIndex { // Load the data pointer from the object. debug!("get_fn({llvtable:?}, {ty:?}, {self:?})"); let llty = bx.fn_ptr_backend_type(fn_abi); - let llvtable = bx.pointercast(llvtable, bx.type_ptr()); if bx.cx().sess().opts.unstable_opts.virtual_function_elimination && bx.cx().sess().lto() == Lto::Fat @@ -33,6 +32,7 @@ impl<'a, 'tcx> VirtualIndex { let vtable_byte_offset = self.0 * bx.data_layout().pointer_size.bytes(); let type_checked_load = bx.type_checked_load(llvtable, vtable_byte_offset, typeid); let func = bx.extract_value(type_checked_load, 0); + // Cast extracted ptr into instruction address space (if necessary) bx.pointercast(func, llty) } else { let ptr_align = bx.tcx().data_layout.pointer_align.abi; @@ -54,7 +54,6 @@ impl<'a, 'tcx> VirtualIndex { debug!("get_int({:?}, {:?})", llvtable, self); let llty = bx.type_isize(); - let llvtable = bx.pointercast(llvtable, bx.type_ptr()); let usize_align = bx.tcx().data_layout.pointer_align.abi; let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]); let ptr = bx.load(llty, gep, usize_align); diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index c0cfdccbe917e..4c39959b8d469 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -426,8 +426,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } }; let ty = bx.cast_backend_type(cast_ty); - let addr = bx.pointercast(llslot, bx.type_ptr()); - bx.load(ty, addr, self.fn_abi.ret.layout.align.abi) + bx.load(ty, llslot, self.fn_abi.ret.layout.align.abi) } }; bx.ret(llval); @@ -1446,8 +1445,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Have to load the argument, maybe while casting it. if let PassMode::Cast(ty, _) = &arg.mode { let llty = bx.cast_backend_type(ty); - let addr = bx.pointercast(llval, bx.type_ptr()); - llval = bx.load(llty, addr, align.min(arg.layout.align.abi)); + llval = bx.load(llty, llval, align.min(arg.layout.align.abi)); } else { // We can't use `PlaceRef::load` here because the argument // may have a type we don't treat as immediate, but the ABI @@ -1826,9 +1824,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { _ => {} } - let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr()); let align = src.layout.align.abi.min(dst.align); - src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, align)); + src.val.store(bx, PlaceRef::new_sized_aligned(dst.llval, src.layout, align)); } // Stores the return value of a function call into it's final location. diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index dc683b510ee5a..1b437449aafc9 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -429,14 +429,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let ty = substs.type_at(0); if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { let weak = instruction == "cxchgweak"; - let mut dst = args[0].immediate(); + let dst = args[0].immediate(); let mut cmp = args[1].immediate(); let mut src = args[2].immediate(); if ty.is_unsafe_ptr() { // Some platforms do not support atomic operations on pointers, // so we cast to integer first. - let ptr_llty = bx.type_ptr(); - dst = bx.pointercast(dst, ptr_llty); cmp = bx.ptrtoint(cmp, bx.type_isize()); src = bx.ptrtoint(src, bx.type_isize()); } @@ -461,13 +459,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { let layout = bx.layout_of(ty); let size = layout.size; - let mut source = args[0].immediate(); + let source = args[0].immediate(); if ty.is_unsafe_ptr() { // Some platforms do not support atomic operations on pointers, // so we cast to integer first... let llty = bx.type_isize(); - let ptr_llty = bx.type_ptr(); - source = bx.pointercast(source, ptr_llty); let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size); // ... and then cast the result back to a pointer bx.inttoptr(result, bx.backend_type(layout)) @@ -484,12 +480,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { let size = bx.layout_of(ty).size; let mut val = args[1].immediate(); - let mut ptr = args[0].immediate(); + let ptr = args[0].immediate(); if ty.is_unsafe_ptr() { // Some platforms do not support atomic operations on pointers, // so we cast to integer first. - let ptr_llty = bx.type_ptr(); - ptr = bx.pointercast(ptr, ptr_llty); val = bx.ptrtoint(val, bx.type_isize()); } bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size); @@ -528,13 +522,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let ty = substs.type_at(0); if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { - let mut ptr = args[0].immediate(); + let ptr = args[0].immediate(); let mut val = args[1].immediate(); if ty.is_unsafe_ptr() { // Some platforms do not support atomic operations on pointers, // so we cast to integer first. - let ptr_llty = bx.type_ptr(); - ptr = bx.pointercast(ptr, ptr_llty); val = bx.ptrtoint(val, bx.type_isize()); } bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering)) @@ -590,9 +582,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if !fn_abi.ret.is_ignore() { if let PassMode::Cast(_, _) = &fn_abi.ret.mode { - let ptr_llty = bx.type_ptr(); - let ptr = bx.pointercast(result.llval, ptr_llty); - bx.store(llval, ptr, result.align); + bx.store(llval, result.llval, result.align); } else { OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) .val diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 763ccea84a2fc..d7c0152f85ae1 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -292,8 +292,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. let ty = bx.backend_type(dest.layout); - let ptr = bx.pointercast(r, bx.type_ptr()); - let val = bx.load(ty, ptr, source_align); + let val = bx.load(ty, r, source_align); bx.store_with_flags(val, dest.llval, dest.align, flags); return; } diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index b08ea5998a1d9..59b1eec7285e7 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -107,8 +107,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { } Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer. - let byte_ptr = bx.pointercast(self.llval, bx.type_ptr()); - bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())]) + bx.gep(bx.cx().type_i8(), self.llval, &[bx.const_usize(offset.bytes())]) } Abi::Scalar(_) | Abi::ScalarPair(..) => { // All fields of Scalar and ScalarPair layouts must have been handled by this point. @@ -125,8 +124,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { } }; PlaceRef { - // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types. - llval: bx.pointercast(llval, bx.type_ptr()), + llval, llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None }, layout: field, align: effective_field_align, @@ -186,16 +184,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { debug!("struct_field_ptr: DST field offset: {:?}", offset); - // Cast and adjust pointer. - let byte_ptr = bx.pointercast(self.llval, bx.type_ptr()); - let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]); - - // Finally, cast back to the type expected. - let ll_fty = bx.cx().backend_type(field); - debug!("struct_field_ptr: Field type is {:?}", ll_fty); + // Adjust pointer. + let byte_ptr = bx.gep(bx.cx().type_i8(), self.llval, &[offset]); PlaceRef { - llval: bx.pointercast(byte_ptr, bx.type_ptr()), + llval: byte_ptr, llextra: self.llextra, layout: field, align: effective_field_align, @@ -497,10 +490,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { ) -> Self { let mut downcast = *self; downcast.layout = self.layout.for_variant(bx.cx(), variant_index); - - // Cast to the appropriate variant struct type. - downcast.llval = bx.pointercast(downcast.llval, bx.type_ptr()); - downcast } @@ -511,10 +500,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { ) -> Self { let mut downcast = *self; downcast.layout = bx.cx().layout_of(ty); - - // Cast to the appropriate type. - downcast.llval = bx.pointercast(downcast.llval, bx.type_ptr()); - downcast } @@ -591,10 +576,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { )); } - // Cast the place pointer type to the new - // array or slice type (`*[%_; new_len]`). - subslice.llval = bx.pointercast(subslice.llval, bx.cx().type_ptr()); - subslice } mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v), diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 9ad96f7a44742..49005c2ff4395 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -249,18 +249,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { { if let OperandValue::Pair(data_ptr, meta) = operand.val { if bx.cx().is_backend_scalar_pair(cast) { - let data_cast = bx.pointercast( - data_ptr, - bx.cx().scalar_pair_element_backend_type(cast, 0, true), - ); - OperandValue::Pair(data_cast, meta) + OperandValue::Pair(data_ptr, meta) } else { // cast to thin-ptr - // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and - // pointer-cast of that pointer to desired pointer type. - let llcast_ty = bx.cx().immediate_backend_type(cast); - let llval = bx.pointercast(data_ptr, llcast_ty); - OperandValue::Immediate(llval) + // Cast of fat-ptr to thin-ptr is an extraction of data-ptr. + OperandValue::Immediate(data_ptr) } } else { bug!("unexpected non-pair operand"); @@ -325,7 +318,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => { - bx.pointercast(llval, ll_t_out) + llval } (CastTy::Int(i), CastTy::Ptr(_)) => { let usize_llval = @@ -477,10 +470,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let content_ty = self.monomorphize(content_ty); let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty)); - let llty_ptr = bx.cx().backend_type(box_layout); - let val = bx.pointercast(lloperand, llty_ptr); - OperandRef { val: OperandValue::Immediate(val), layout: box_layout } + OperandRef { val: OperandValue::Immediate(lloperand), layout: box_layout } } } } diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs index fdc7a30e841ed..c20c42583d3c9 100644 --- a/compiler/rustc_codegen_ssa/src/traits/consts.rs +++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs @@ -35,6 +35,4 @@ pub trait ConstMethods<'tcx>: BackendTypes { alloc: ConstAllocation<'tcx>, offset: Size, ) -> PlaceRef<'tcx, Self::Value>; - - fn const_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; }