diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index b472d05464637..57b5701edf54b 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -161,58 +161,63 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn set_span(&mut self, _span: Span) {} - fn ret_void(&mut self) { + fn ret_void(self) -> Self::Unpositioned { unsafe { llvm::LLVMBuildRetVoid(self.llbuilder); } + self.into_unpositioned() } - fn ret(&mut self, v: &'ll Value) { + fn ret(self, v: &'ll Value) -> Self::Unpositioned { unsafe { llvm::LLVMBuildRet(self.llbuilder, v); } + self.into_unpositioned() } - fn br(&mut self, dest: &'ll BasicBlock) { + fn br(self, dest: &'ll BasicBlock) -> Self::Unpositioned { unsafe { llvm::LLVMBuildBr(self.llbuilder, dest); } + self.into_unpositioned() } fn cond_br( - &mut self, + self, cond: &'ll Value, then_llbb: &'ll BasicBlock, else_llbb: &'ll BasicBlock, - ) { + ) -> Self::Unpositioned { unsafe { llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb); } + self.into_unpositioned() } fn switch( - &mut self, + self, v: &'ll Value, else_llbb: &'ll BasicBlock, cases: impl ExactSizeIterator, - ) { + ) -> Self::Unpositioned { let switch = unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) }; for (on_val, dest) in cases { let on_val = self.const_uint_big(self.val_ty(v), on_val); unsafe { llvm::LLVMAddCase(switch, on_val, dest) } } + self.into_unpositioned() } fn invoke( - &mut self, + mut self, llfn: &'ll Value, args: &[&'ll Value], then: &'ll BasicBlock, catch: &'ll BasicBlock, funclet: Option<&Funclet<'ll>>, fn_abi_for_attrs: Option<&FnAbi<'tcx, Ty<'tcx>>>, - ) -> &'ll Value { + ) -> (Self::Unpositioned, &'ll Value) { debug!("invoke {:?} with args ({:?})", llfn, args); let args = self.check_call("invoke", llfn, args); @@ -232,15 +237,16 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) }; if let Some(fn_abi) = fn_abi_for_attrs { - fn_abi.apply_attrs_callsite(self, invoke); + fn_abi.apply_attrs_callsite(&mut self, invoke); } - invoke + (self.into_unpositioned(), invoke) } - fn unreachable(&mut self) { + fn unreachable(self) -> Self::Unpositioned { unsafe { llvm::LLVMBuildUnreachable(self.llbuilder); } + self.into_unpositioned() } builder_methods_for_value_instructions! { @@ -530,29 +536,33 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { count: u64, dest: PlaceRef<'tcx, &'ll Value>, ) -> Self { - let zero = self.const_usize(0); - let count = self.const_usize(count); - let start = dest.project_index(&mut self, zero).llval; - let end = dest.project_index(&mut self, count).llval; + let cx = self.cx; + let original_llbb = self.llbb(); + + let start = dest.project_index(&mut self, cx.const_usize(0)).llval; + let end = dest.project_index(&mut self, cx.const_usize(count)).llval; let mut header_bx = self.build_sibling_block("repeat_loop_header"); + let header_llbb = header_bx.llbb(); let mut body_bx = self.build_sibling_block("repeat_loop_body"); + let body_llbb = body_bx.llbb(); let next_bx = self.build_sibling_block("repeat_loop_next"); + let current_llty = cx.val_ty(start); self.br(header_bx.llbb()); - let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]); + let current = header_bx.phi(current_llty, &[start], &[original_llbb]); let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); - let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); + let align = dest.align.restrict_for_offset(dest.layout.field(cx, 0).size); cg_elem .val .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align)); - let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]); - body_bx.br(header_bx.llbb()); - Self::add_incoming_to_phi(current, &[next], &[body_bx.llbb()]); + let next = body_bx.inbounds_gep(current, &[cx.const_usize(1)]); + body_bx.br(header_llbb); + Self::add_incoming_to_phi(current, &[next], &[body_llbb]); next_bx } @@ -958,8 +968,9 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn resume(&mut self, exn: &'ll Value) -> &'ll Value { - unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) } + fn resume(self, exn: &'ll Value) -> (Self::Unpositioned, &'ll Value) { + let resume = unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }; + (self.into_unpositioned(), resume) } fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> { @@ -977,13 +988,13 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn cleanup_ret( - &mut self, + self, funclet: &Funclet<'ll>, unwind: Option<&'ll BasicBlock>, - ) -> &'ll Value { + ) -> (Self::Unpositioned, &'ll Value) { let ret = unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) }; - ret.expect("LLVM does not have support for cleanupret") + (self.into_unpositioned(), ret.expect("LLVM does not have support for cleanupret")) } fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> { @@ -1001,11 +1012,11 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn catch_switch( - &mut self, + self, parent: Option<&'ll Value>, unwind: Option<&'ll BasicBlock>, handlers: &[&'ll BasicBlock], - ) -> &'ll Value { + ) -> (Self::Unpositioned, &'ll Value) { let name = cstr!("catchswitch"); let ret = unsafe { llvm::LLVMRustBuildCatchSwitch( @@ -1022,7 +1033,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { llvm::LLVMRustAddHandler(catch_switch, handler); } } - catch_switch + (self.into_unpositioned(), catch_switch) } fn set_personality_fn(&mut self, personality: &'ll Value) { diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 85ea04134a043..20e90e3051510 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -405,13 +405,15 @@ fn codegen_msvc_try( dest: &'ll Value, ) { let llfn = get_rust_try_fn(bx, &mut |mut bx| { + let cx = bx.cx; + bx.set_personality_fn(bx.eh_personality()); - let mut normal = bx.build_sibling_block("normal"); - let mut catchswitch = bx.build_sibling_block("catchswitch"); + let normal = bx.build_sibling_block("normal"); + let catchswitch = bx.build_sibling_block("catchswitch"); let mut catchpad_rust = bx.build_sibling_block("catchpad_rust"); let mut catchpad_foreign = bx.build_sibling_block("catchpad_foreign"); - let mut caught = bx.build_sibling_block("caught"); + let caught = bx.build_sibling_block("caught"); let try_func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); @@ -476,9 +478,9 @@ fn codegen_msvc_try( let slot = bx.alloca(bx.type_i8p(), ptr_align); bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None, None); - normal.ret(bx.const_i32(0)); + normal.ret(cx.const_i32(0)); - let cs = + let (_, cs) = catchswitch.catch_switch(None, None, &[catchpad_rust.llbb(), catchpad_foreign.llbb()]); // We can't use the TypeDescriptor defined in libpanic_unwind because it @@ -495,14 +497,14 @@ fn codegen_msvc_try( // // When modifying, make sure that the type_name string exactly matches // the one used in src/libpanic_unwind/seh.rs. - let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p()); - let type_name = bx.const_bytes(b"rust_panic\0"); + let type_info_vtable = cx.declare_global("??_7type_info@@6B@", cx.type_i8p()); + let type_name = cx.const_bytes(b"rust_panic\0"); let type_info = - bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false); - let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info)); + cx.const_struct(&[type_info_vtable, cx.const_null(cx.type_i8p()), type_name], false); + let tydesc = cx.declare_global("__rust_panic_type_info", cx.val_ty(type_info)); unsafe { llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage); - llvm::SetUniqueComdat(bx.llmod, tydesc); + llvm::SetUniqueComdat(cx.llmod, tydesc); llvm::LLVMSetInitializer(tydesc, type_info); } @@ -512,20 +514,20 @@ fn codegen_msvc_try( // since our exception object effectively contains a Box. // // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang - let flags = bx.const_i32(8); + let flags = cx.const_i32(8); let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]); let ptr = catchpad_rust.load(slot, ptr_align); catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet), None); catchpad_rust.catch_ret(&funclet, caught.llbb()); // The flag value of 64 indicates a "catch-all". - let flags = bx.const_i32(64); - let null = bx.const_null(bx.type_i8p()); + let flags = cx.const_i32(64); + let null = cx.const_null(cx.type_i8p()); let funclet = catchpad_foreign.catch_pad(cs, &[null, flags, null]); catchpad_foreign.call(catch_func, &[data, null], Some(&funclet), None); catchpad_foreign.catch_ret(&funclet, caught.llbb()); - caught.ret(bx.const_i32(1)); + caught.ret(cx.const_i32(1)); }); // Note that no invoke is used here because by definition this function @@ -553,7 +555,9 @@ fn codegen_gnu_try( catch_func: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(bx, &mut |mut bx| { + let llfn = get_rust_try_fn(bx, &mut |bx| { + let cx = bx.cx; + // Codegens the shims described above: // // bx: @@ -566,14 +570,14 @@ fn codegen_gnu_try( // (%ptr, _) = landingpad // call %catch_func(%data, %ptr) // ret 1 - let mut then = bx.build_sibling_block("then"); + let then = bx.build_sibling_block("then"); let mut catch = bx.build_sibling_block("catch"); let try_func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None, None); - then.ret(bx.const_i32(0)); + then.ret(cx.const_i32(0)); // Type indicator for the exception being thrown. // @@ -581,13 +585,13 @@ fn codegen_gnu_try( // being thrown. The second value is a "selector" indicating which of // the landing pad clauses the exception's type had been matched to. // rust_try ignores the selector. - let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); - let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1); - let tydesc = bx.const_null(bx.type_i8p()); + let lpad_ty = cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false); + let vals = catch.landing_pad(lpad_ty, cx.eh_personality(), 1); + let tydesc = cx.const_null(cx.type_i8p()); catch.add_clause(vals, tydesc); let ptr = catch.extract_value(vals, 0); catch.call(catch_func, &[data, ptr], None, None); - catch.ret(bx.const_i32(1)); + catch.ret(cx.const_i32(1)); }); // Note that no invoke is used here because by definition this function @@ -607,7 +611,9 @@ fn codegen_emcc_try( catch_func: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(bx, &mut |mut bx| { + let llfn = get_rust_try_fn(bx, &mut |bx| { + let cx = bx.cx; + // Codegens the shims described above: // // bx: @@ -625,48 +631,48 @@ fn codegen_emcc_try( // %catch_data[1] = %is_rust_panic // call %catch_func(%data, %catch_data) // ret 1 - let mut then = bx.build_sibling_block("then"); + let then = bx.build_sibling_block("then"); let mut catch = bx.build_sibling_block("catch"); let try_func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None, None); - then.ret(bx.const_i32(0)); + then.ret(cx.const_i32(0)); // Type indicator for the exception being thrown. // // The first value in this tuple is a pointer to the exception object // being thrown. The second value is a "selector" indicating which of // the landing pad clauses the exception's type had been matched to. - let tydesc = bx.eh_catch_typeinfo(); - let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); - let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 2); + let tydesc = cx.eh_catch_typeinfo(); + let lpad_ty = cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false); + let vals = catch.landing_pad(lpad_ty, cx.eh_personality(), 2); catch.add_clause(vals, tydesc); - catch.add_clause(vals, bx.const_null(bx.type_i8p())); + catch.add_clause(vals, cx.const_null(cx.type_i8p())); let ptr = catch.extract_value(vals, 0); let selector = catch.extract_value(vals, 1); // Check if the typeid we got is the one for a Rust panic. - let llvm_eh_typeid_for = bx.get_intrinsic("llvm.eh.typeid.for"); + let llvm_eh_typeid_for = cx.get_intrinsic("llvm.eh.typeid.for"); let rust_typeid = catch.call(llvm_eh_typeid_for, &[tydesc], None, None); let is_rust_panic = catch.icmp(IntPredicate::IntEQ, selector, rust_typeid); - let is_rust_panic = catch.zext(is_rust_panic, bx.type_bool()); + let is_rust_panic = catch.zext(is_rust_panic, cx.type_bool()); // We need to pass two values to catch_func (ptr and is_rust_panic), so // create an alloca and pass a pointer to that. - let ptr_align = bx.tcx().data_layout.pointer_align.abi; - let i8_align = bx.tcx().data_layout.i8_align.abi; + let ptr_align = cx.tcx.data_layout.pointer_align.abi; + let i8_align = cx.tcx.data_layout.i8_align.abi; let catch_data = - catch.alloca(bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false), ptr_align); - let catch_data_0 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(0)]); + catch.alloca(cx.type_struct(&[cx.type_i8p(), cx.type_bool()], false), ptr_align); + let catch_data_0 = catch.inbounds_gep(catch_data, &[cx.const_usize(0), cx.const_usize(0)]); catch.store(ptr, catch_data_0, ptr_align); - let catch_data_1 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(1)]); + let catch_data_1 = catch.inbounds_gep(catch_data, &[cx.const_usize(0), cx.const_usize(1)]); catch.store(is_rust_panic, catch_data_1, i8_align); - let catch_data = catch.bitcast(catch_data, bx.type_i8p()); + let catch_data = catch.bitcast(catch_data, cx.type_i8p()); catch.call(catch_func, &[data, catch_data], None, None); - catch.ret(bx.const_i32(1)); + catch.ret(cx.const_i32(1)); }); // Note that no invoke is used here because by definition this function diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index ef2aa921e01c6..efcfabad0c267 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -90,10 +90,12 @@ fn emit_ptr_va_arg( } fn emit_aapcs_va_arg( - bx: &mut Builder<'a, 'll, 'tcx>, + mut bx: Builder<'a, 'll, 'tcx>, list: OperandRef<'tcx, &'ll Value>, target_ty: Ty<'tcx>, -) -> &'ll Value { +) -> (Builder<'a, 'll, 'tcx>, &'ll Value) { + let cx = bx.cx; + // Implementation of the AAPCS64 calling convention for va_args see // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst let va_list_addr = list.immediate(); @@ -101,7 +103,9 @@ fn emit_aapcs_va_arg( let mut maybe_reg = bx.build_sibling_block("va_arg.maybe_reg"); let mut in_reg = bx.build_sibling_block("va_arg.in_reg"); + let in_reg_llbb = in_reg.llbb(); let mut on_stack = bx.build_sibling_block("va_arg.on_stack"); + let on_stack_llbb = on_stack.llbb(); let mut end = bx.build_sibling_block("va_arg.end"); let zero = bx.const_i32(0); let offset_align = Align::from_bytes(4).unwrap(); @@ -127,10 +131,10 @@ fn emit_aapcs_va_arg( // the offset again. if gr_type && layout.align.abi.bytes() > 8 { - reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(15)); - reg_off_v = maybe_reg.and(reg_off_v, bx.const_i32(-16)); + reg_off_v = maybe_reg.add(reg_off_v, cx.const_i32(15)); + reg_off_v = maybe_reg.and(reg_off_v, cx.const_i32(-16)); } - let new_reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(slot_size as i32)); + let new_reg_off_v = maybe_reg.add(reg_off_v, cx.const_i32(slot_size as i32)); maybe_reg.store(new_reg_off_v, reg_off, offset_align); @@ -140,16 +144,16 @@ fn emit_aapcs_va_arg( maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb()); let top = in_reg.struct_gep(va_list_addr, reg_top_index); - let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi); + let top = in_reg.load(top, cx.tcx().data_layout.pointer_align.abi); // reg_value = *(@top + reg_off_v); let mut reg_addr = in_reg.gep(top, &[reg_off_v]); - if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size { + if cx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size { // On big-endian systems the value is right-aligned in its slot. - let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32); + let offset = cx.const_i32((slot_size - layout.size.bytes()) as i32); reg_addr = in_reg.gep(reg_addr, &[offset]); } - let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(layout.llvm_type(bx))); + let reg_addr = in_reg.bitcast(reg_addr, cx.type_ptr_to(layout.llvm_type(cx))); let reg_value = in_reg.load(reg_addr, layout.align.abi); in_reg.br(&end.llbb()); @@ -159,13 +163,12 @@ fn emit_aapcs_va_arg( on_stack.br(&end.llbb()); let val = end.phi( - layout.immediate_llvm_type(bx), + layout.immediate_llvm_type(cx), &[reg_value, stack_value], - &[&in_reg.llbb(), &on_stack.llbb()], + &[&in_reg_llbb, &on_stack_llbb], ); - *bx = end; - val + (end, val) } pub(super) fn emit_va_arg( @@ -194,7 +197,11 @@ pub(super) fn emit_va_arg( "aarch64" if target.is_like_osx => { emit_ptr_va_arg(&mut bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true) } - "aarch64" => emit_aapcs_va_arg(&mut bx, addr, target_ty), + "aarch64" => { + let (new_bx, val) = emit_aapcs_va_arg(bx, addr, target_ty); + bx = new_bx; + val + } // Windows x86_64 "x86_64" if target.is_like_windows => { let target_ty_size = bx.cx.size_of(target_ty).bytes(); diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 6b387a282dda2..16da2219f0234 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -74,9 +74,10 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target); - let mut trampoline = fx.new_block(name); + let trampoline = fx.new_block(name); + let trampoline_llbb = trampoline.llbb(); trampoline.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)); - trampoline.llbb() + trampoline_llbb } else { lltarget } @@ -85,16 +86,16 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { fn funclet_br>( &self, fx: &mut FunctionCx<'a, 'tcx, Bx>, - bx: &mut Bx, + bx: Bx, target: mir::BasicBlock, - ) { + ) -> Bx::Unpositioned { let (lltarget, is_cleanupret) = self.lltarget(fx, target); if is_cleanupret { // micro-optimization: generate a `ret` rather than a jump // to a trampoline. - bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)); + bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)).0 } else { - bx.br(lltarget); + bx.br(lltarget) } } @@ -103,25 +104,25 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { fn do_call>( &self, fx: &mut FunctionCx<'a, 'tcx, Bx>, - bx: &mut Bx, + mut bx: Bx, fn_abi: FnAbi<'tcx, Ty<'tcx>>, fn_ptr: Bx::Value, llargs: &[Bx::Value], destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>, cleanup: Option, - ) { + ) -> Bx::Unpositioned { // If there is a cleanup block and the function we're calling can unwind, then // do an invoke, otherwise do a call. if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) { - let ret_bx = if let Some((_, target)) = destination { + let ret_llbb = if let Some((_, target)) = destination { fx.blocks[target] } else { fx.unreachable_block() }; - let invokeret = bx.invoke( + let (unpositioned_bx, invokeret) = bx.invoke( fn_ptr, &llargs, - ret_bx, + ret_llbb, self.llblock(fx, cleanup), self.funclet(fx), Some(&fn_abi), @@ -132,6 +133,8 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { fx.set_debug_loc(&mut ret_bx, self.terminator.source_info); fx.store_return(&mut ret_bx, ret_dest, &fn_abi.ret, invokeret); } + + unpositioned_bx } else { let llret = bx.call(fn_ptr, &llargs, self.funclet(fx), Some(&fn_abi)); if fx.mir[self.bb].is_cleanup { @@ -143,10 +146,10 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { } if let Some((ret_dest, target)) = destination { - fx.store_return(bx, ret_dest, &fn_abi.ret, llret); - self.funclet_br(fx, bx, target); + fx.store_return(&mut bx, ret_dest, &fn_abi.ret, llret); + self.funclet_br(fx, bx, target) } else { - bx.unreachable(); + bx.unreachable() } } } @@ -155,9 +158,13 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { /// Codegen implementations for some terminator variants. impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { /// Generates code for a `Resume` terminator. - fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, mut bx: Bx) { + fn codegen_resume_terminator( + &mut self, + helper: TerminatorCodegenHelper<'tcx>, + mut bx: Bx, + ) -> Bx::Unpositioned { if let Some(funclet) = helper.funclet(self) { - bx.cleanup_ret(funclet, None); + bx.cleanup_ret(funclet, None).0 } else { let slot = self.get_personality_slot(&mut bx); let lp0 = slot.project_field(&mut bx, 0); @@ -169,7 +176,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let mut lp = bx.const_undef(self.landing_pad_type()); lp = bx.insert_value(lp, lp0, 0); lp = bx.insert_value(lp, lp1, 1); - bx.resume(lp); + bx.resume(lp).0 } } @@ -180,7 +187,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { discr: &mir::Operand<'tcx>, switch_ty: Ty<'tcx>, targets: &SwitchTargets, - ) { + ) -> Bx::Unpositioned { let discr = self.codegen_operand(&mut bx, &discr); // `switch_ty` is redundant, sanity-check that. assert_eq!(discr.layout.ty, switch_ty); @@ -201,18 +208,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let switch_llty = bx.immediate_backend_type(bx.layout_of(switch_ty)); let llval = bx.const_uint_big(switch_llty, test_value); let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); - bx.cond_br(cmp, lltrue, llfalse); + bx.cond_br(cmp, lltrue, llfalse) } } else { bx.switch( discr.immediate(), helper.llblock(self, targets.otherwise()), target_iter.map(|(value, target)| (value, helper.llblock(self, target))), - ); + ) } } - fn codegen_return_terminator(&mut self, mut bx: Bx) { + fn codegen_return_terminator(&mut self, mut bx: Bx) -> Bx::Unpositioned { // Call `va_end` if this is the definition of a C-variadic function. if self.fn_abi.c_variadic { // The `VaList` "spoofed" argument is just after all the real arguments. @@ -232,13 +239,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.abort(); // `abort` does not terminate the block, so we still need to generate // an `unreachable` terminator after it. - bx.unreachable(); - return; + return bx.unreachable(); } let llval = match self.fn_abi.ret.mode { PassMode::Ignore | PassMode::Indirect { .. } => { - bx.ret_void(); - return; + return bx.ret_void(); } PassMode::Direct(_) | PassMode::Pair(..) => { @@ -275,7 +280,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.load(addr, self.fn_abi.ret.layout.align.abi) } }; - bx.ret(llval); + bx.ret(llval) } fn codegen_drop_terminator( @@ -285,15 +290,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { location: mir::Place<'tcx>, target: mir::BasicBlock, unwind: Option, - ) { + ) -> Bx::Unpositioned { let ty = location.ty(self.mir, bx.tcx()).ty; let ty = self.monomorphize(ty); let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything. - helper.funclet_br(self, &mut bx, target); - return; + return helper.funclet_br(self, bx, target); } let place = self.codegen_place(&mut bx, location.as_ref()); @@ -320,15 +324,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } _ => (bx.get_fn_addr(drop_fn), FnAbi::of_instance(&bx, drop_fn, &[])), }; - helper.do_call( - self, - &mut bx, - fn_abi, - drop_fn, - args, - Some((ReturnDest::Nothing, target)), - unwind, - ); + helper.do_call(self, bx, fn_abi, drop_fn, args, Some((ReturnDest::Nothing, target)), unwind) } fn codegen_assert_terminator( @@ -341,7 +337,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { msg: &mir::AssertMessage<'tcx>, target: mir::BasicBlock, cleanup: Option, - ) { + ) -> Bx::Unpositioned { let span = terminator.source_info.span; let cond = self.codegen_operand(&mut bx, cond).immediate(); let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1); @@ -361,8 +357,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Don't codegen the panic block if success if known. if const_cond == Some(expected) { - helper.funclet_br(self, &mut bx, target); - return; + return helper.funclet_br(self, bx, target); } // Pass the condition through llvm.expect for branch hinting. @@ -409,20 +404,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let llfn = bx.get_fn_addr(instance); // Codegen the actual panic invoke/call. - helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup); + helper.do_call(self, bx, fn_abi, llfn, &args, None, cleanup) } - /// Returns `true` if this is indeed a panic intrinsic and codegen is done. + /// Returns `Ok` if this is indeed a panic intrinsic and codegen is done, + /// otherwise returns `Err(bx)`, having not touched `bx`. fn codegen_panic_intrinsic( &mut self, helper: &TerminatorCodegenHelper<'tcx>, - bx: &mut Bx, + mut bx: Bx, intrinsic: Option, instance: Option>, source_info: mir::SourceInfo, destination: &Option<(mir::Place<'tcx>, mir::BasicBlock)>, cleanup: Option, - ) -> bool { + ) -> Result { // Emit a panic or a no-op for `assert_*` intrinsics. // These are intrinsics that compile to panics so that we can get a message // which mentions the offending type, even from a const context. @@ -445,11 +441,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let do_panic = match intrinsic { Inhabited => layout.abi.is_uninhabited(), // We unwrap as the error type is `!`. - ZeroValid => !layout.might_permit_raw_init(bx, /*zero:*/ true).unwrap(), + ZeroValid => !layout.might_permit_raw_init(&bx, /*zero:*/ true).unwrap(), // We unwrap as the error type is `!`. - UninitValid => !layout.might_permit_raw_init(bx, /*zero:*/ false).unwrap(), + UninitValid => !layout.might_permit_raw_init(&bx, /*zero:*/ false).unwrap(), }; - if do_panic { + Ok(if do_panic { let msg_str = with_no_trimmed_paths(|| { if layout.abi.is_uninhabited() { // Use this error even for the other intrinsics as it is more precise. @@ -461,14 +457,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } }); let msg = bx.const_str(Symbol::intern(&msg_str)); - let location = self.get_caller_location(bx, source_info).immediate(); + let location = self.get_caller_location(&mut bx, source_info).immediate(); // Obtain the panic entry point. // FIXME: dedup this with `codegen_assert_terminator` above. let def_id = common::langcall(bx.tcx(), Some(source_info.span), "", LangItem::Panic); let instance = ty::Instance::mono(bx.tcx(), def_id); - let fn_abi = FnAbi::of_instance(bx, instance, &[]); + let fn_abi = FnAbi::of_instance(&bx, instance, &[]); let llfn = bx.get_fn_addr(instance); // Codegen the actual panic invoke/call. @@ -480,15 +476,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { &[msg.0, msg.1, location], destination.as_ref().map(|(_, bb)| (ReturnDest::Nothing, *bb)), cleanup, - ); + ) } else { // a NOP let target = destination.as_ref().unwrap().1; helper.funclet_br(self, bx, target) - } - true + }) } else { - false + Err(bx) } } @@ -502,7 +497,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { destination: &Option<(mir::Place<'tcx>, mir::BasicBlock)>, cleanup: Option, fn_span: Span, - ) { + ) -> Bx::Unpositioned { let source_info = terminator.source_info; let span = source_info.span; @@ -527,8 +522,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if let Some(ty::InstanceDef::DropGlue(_, None)) = def { // Empty drop glue; a no-op. let &(_, target) = destination.as_ref().unwrap(); - helper.funclet_br(self, &mut bx, target); - return; + return helper.funclet_br(self, bx, target); } // FIXME(eddyb) avoid computing this if possible, when `instance` is @@ -558,10 +552,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; if intrinsic == Some(sym::transmute) { - if let Some(destination_ref) = destination.as_ref() { + return if let Some(destination_ref) = destination.as_ref() { let &(dest, target) = destination_ref; self.codegen_transmute(&mut bx, &args[0], dest); - helper.funclet_br(self, &mut bx, target); + helper.funclet_br(self, bx, target) } else { // If we are trying to transmute to an uninhabited type, // it is likely there is no allotted destination. In fact, @@ -570,22 +564,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // into an uninhabited type is impossible, so anything following // it must be unreachable. assert_eq!(fn_abi.ret.layout.abi, abi::Abi::Uninhabited); - bx.unreachable(); - } - return; + bx.unreachable() + }; } - if self.codegen_panic_intrinsic( + bx = match self.codegen_panic_intrinsic( &helper, - &mut bx, + bx, intrinsic, instance, source_info, destination, cleanup, ) { - return; - } + Ok(unpositioned_bx) => return unpositioned_bx, + Err(bx) => bx, + }; // The arguments we'll be passing. Plus one to account for outptr, if used. let arg_count = fn_abi.args.len() + fn_abi.ret.is_indirect() as usize; @@ -600,17 +594,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; if intrinsic == Some(sym::caller_location) { - if let Some((_, target)) = destination.as_ref() { - let location = self - .get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info }); + let &(_, target) = destination.as_ref().unwrap(); + let location = + self.get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info }); - if let ReturnDest::IndirectOperand(tmp, _) = ret_dest { - location.val.store(&mut bx, tmp); - } - self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate()); - helper.funclet_br(self, &mut bx, *target); + if let ReturnDest::IndirectOperand(tmp, _) = ret_dest { + location.val.store(&mut bx, tmp); } - return; + self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate()); + return helper.funclet_br(self, bx, target); } match intrinsic { @@ -667,13 +659,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval); } - if let Some((_, target)) = *destination { - helper.funclet_br(self, &mut bx, target); + return if let Some((_, target)) = *destination { + helper.funclet_br(self, bx, target) } else { - bx.unreachable(); - } - - return; + bx.unreachable() + }; } } @@ -786,13 +776,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { helper.do_call( self, - &mut bx, + bx, fn_abi, fn_ptr, &llargs, destination.as_ref().map(|&(_, target)| (ret_dest, target)), cleanup, - ); + ) } fn codegen_asm_terminator( @@ -805,7 +795,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { options: ast::InlineAsmOptions, line_spans: &[Span], destination: Option, - ) { + ) -> Bx::Unpositioned { let span = terminator.source_info.span; let operands: Vec<_> = operands @@ -882,9 +872,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.codegen_inline_asm(template, &operands, options, line_spans); if let Some(target) = destination { - helper.funclet_br(self, &mut bx, target); + helper.funclet_br(self, bx, target) } else { - bx.unreachable(); + bx.unreachable() } } } @@ -909,7 +899,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mut bx: Bx, bb: mir::BasicBlock, terminator: &'tcx mir::Terminator<'tcx>, - ) { + ) -> Bx::Unpositioned { debug!("codegen_terminator: {:?}", terminator); // Create the cleanup bundle, if needed. @@ -924,7 +914,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.abort(); // `abort` does not terminate the block, so we still need to generate // an `unreachable` terminator after it. - bx.unreachable(); + bx.unreachable() } mir::TerminatorKind::Goto { target } => { @@ -939,30 +929,25 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.sideeffect(); } - helper.funclet_br(self, &mut bx, target); + helper.funclet_br(self, bx, target) } mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => { - self.codegen_switchint_terminator(helper, bx, discr, switch_ty, targets); + self.codegen_switchint_terminator(helper, bx, discr, switch_ty, targets) } - mir::TerminatorKind::Return => { - self.codegen_return_terminator(bx); - } + mir::TerminatorKind::Return => self.codegen_return_terminator(bx), - mir::TerminatorKind::Unreachable => { - bx.unreachable(); - } + mir::TerminatorKind::Unreachable => bx.unreachable(), mir::TerminatorKind::Drop { place, target, unwind } => { - self.codegen_drop_terminator(helper, bx, place, target, unwind); + self.codegen_drop_terminator(helper, bx, place, target, unwind) } - mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { - self.codegen_assert_terminator( + mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => self + .codegen_assert_terminator( helper, bx, terminator, cond, expected, msg, target, cleanup, - ); - } + ), mir::TerminatorKind::DropAndReplace { .. } => { bug!("undesugared DropAndReplace in codegen: {:?}", terminator); @@ -975,18 +960,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { cleanup, from_hir_call: _, fn_span, - } => { - self.codegen_call_terminator( - helper, - bx, - terminator, - func, - args, - destination, - cleanup, - fn_span, - ); - } + } => self.codegen_call_terminator( + helper, + bx, + terminator, + func, + args, + destination, + cleanup, + fn_span, + ), mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Yield { .. } => { bug!("generator ops in codegen") } @@ -1000,18 +983,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { options, line_spans, destination, - } => { - self.codegen_asm_terminator( - helper, - bx, - terminator, - template, - operands, - options, - line_spans, - destination, - ); - } + } => self.codegen_asm_terminator( + helper, + bx, + terminator, + template, + operands, + options, + line_spans, + destination, + ), } } @@ -1216,6 +1197,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } let mut bx = self.new_block("cleanup"); + let landing_pad_llbb = bx.llbb(); let llpersonality = self.cx.eh_personality(); let llretty = self.landing_pad_type(); @@ -1227,7 +1209,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot); bx.br(target_bb); - bx.llbb() + + landing_pad_llbb } fn landing_pad_type(&self) -> Bx::Type { @@ -1237,10 +1220,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { fn unreachable_block(&mut self) -> Bx::BasicBlock { self.unreachable_block.unwrap_or_else(|| { - let mut bx = self.new_block("unreachable"); + let bx = self.new_block("unreachable"); + let llbb = bx.llbb(); bx.unreachable(); - self.unreachable_block = Some(bx.llbb()); - bx.llbb() + self.unreachable_block = Some(llbb); + llbb }) } diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 1213716098bce..3431e6695a101 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -262,6 +262,9 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // Remove blocks that haven't been visited, or have no // predecessors. + // FIXME(eddyb) shouldn't need to create a positioned `Bx` just to + // call `delete_basic_block` on it. + let mut bx = fx.build_block(mir::START_BLOCK); for bb in mir.basic_blocks().indices() { // Unreachable block if !visited.contains(bb.index()) { @@ -313,11 +316,11 @@ fn create_funclets<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // bar(); // } Some(&mir::TerminatorKind::Abort) => { - let mut cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb)); + let cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb)); let mut cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb)); ret_llbb = cs_bx.llbb(); - let cs = cs_bx.catch_switch(None, None, &[cp_bx.llbb()]); + let (_, cs) = cs_bx.catch_switch(None, None, &[cp_bx.llbb()]); // The "null" here is actually a RTTI type descriptor for the // C++ personality function, but `catch (...)` has no type so diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index 9418830f7dbe3..99f353dbe6d60 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -57,31 +57,55 @@ pub trait BuilderMethods<'a, 'tcx>: fn llbb(&self) -> Self::BasicBlock; fn set_span(&mut self, span: Span); - fn ret_void(&mut self); - fn ret(&mut self, v: Self::Value); - fn br(&mut self, dest: Self::BasicBlock); + // Terminator instructions (the final instruction in a block). + // These methods take the IR builder by value and return an unpositioned one + // (in order to make it impossible to accidentally add more instructions). + + fn ret_void(self) -> Self::Unpositioned; + fn ret(self, v: Self::Value) -> Self::Unpositioned; + fn br(self, dest: Self::BasicBlock) -> Self::Unpositioned; fn cond_br( - &mut self, + self, cond: Self::Value, then_llbb: Self::BasicBlock, else_llbb: Self::BasicBlock, - ); + ) -> Self::Unpositioned; fn switch( - &mut self, + self, v: Self::Value, else_llbb: Self::BasicBlock, cases: impl ExactSizeIterator, - ); + ) -> Self::Unpositioned; + fn unreachable(self) -> Self::Unpositioned; + + // EH (exception handling) terminator instructions. + // Just like regular terminators, these methods transform the IR builder type, + // but they can also return values (for various reasons). + // FIXME(eddyb) a lot of these are LLVM-specific, redesign them. + fn invoke( - &mut self, + self, llfn: Self::Value, args: &[Self::Value], then: Self::BasicBlock, catch: Self::BasicBlock, funclet: Option<&Self::Funclet>, fn_abi_for_attrs: Option<&FnAbi<'tcx, Ty<'tcx>>>, - ) -> Self::Value; - fn unreachable(&mut self); + ) -> (Self::Unpositioned, Self::Value); + fn resume(self, exn: Self::Value) -> (Self::Unpositioned, Self::Value); + fn cleanup_ret( + self, + funclet: &Self::Funclet, + unwind: Option, + ) -> (Self::Unpositioned, Self::Value); + fn catch_switch( + self, + parent: Option, + unwind: Option, + handlers: &[Self::BasicBlock], + ) -> (Self::Unpositioned, Self::Value); + + // Regular (intra-block) instructions. fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; @@ -243,20 +267,8 @@ pub trait BuilderMethods<'a, 'tcx>: num_clauses: usize, ) -> Self::Value; fn set_cleanup(&mut self, landing_pad: Self::Value); - fn resume(&mut self, exn: Self::Value) -> Self::Value; fn cleanup_pad(&mut self, parent: Option, args: &[Self::Value]) -> Self::Funclet; - fn cleanup_ret( - &mut self, - funclet: &Self::Funclet, - unwind: Option, - ) -> Self::Value; fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet; - fn catch_switch( - &mut self, - parent: Option, - unwind: Option, - handlers: &[Self::BasicBlock], - ) -> Self::Value; fn set_personality_fn(&mut self, personality: Self::Value); fn atomic_cmpxchg(