diff --git a/compiler/rustc_builtin_macros/src/asm.rs b/compiler/rustc_builtin_macros/src/asm.rs
index ae2627d693867..e313016e3d863 100644
--- a/compiler/rustc_builtin_macros/src/asm.rs
+++ b/compiler/rustc_builtin_macros/src/asm.rs
@@ -812,6 +812,44 @@ pub(super) fn expand_asm<'cx>(
     })
 }
 
+pub(super) fn expand_naked_asm<'cx>(
+    ecx: &'cx mut ExtCtxt<'_>,
+    sp: Span,
+    tts: TokenStream,
+) -> MacroExpanderResult<'cx> {
+    ExpandResult::Ready(match parse_args(ecx, sp, tts, false) {
+        Ok(args) => {
+            let ExpandResult::Ready(mac) = expand_preparsed_asm(ecx, args) else {
+                return ExpandResult::Retry(());
+            };
+            let expr = match mac {
+                Ok(mut inline_asm) => {
+                    // for future compatibility, we always set the NORETURN option.
+                    //
+                    // When we turn `asm!` into `naked_asm!` with this implementation, we can drop
+                    // the `options(noreturn)`, which makes the upgrade smooth when `naked_asm!`
+                    // starts disallowing the `noreturn` option in the future
+                    inline_asm.options |= ast::InlineAsmOptions::NORETURN;
+
+                    P(ast::Expr {
+                        id: ast::DUMMY_NODE_ID,
+                        kind: ast::ExprKind::InlineAsm(P(inline_asm)),
+                        span: sp,
+                        attrs: ast::AttrVec::new(),
+                        tokens: None,
+                    })
+                }
+                Err(guar) => DummyResult::raw_expr(sp, Some(guar)),
+            };
+            MacEager::expr(expr)
+        }
+        Err(err) => {
+            let guar = err.emit();
+            DummyResult::any(sp, guar)
+        }
+    })
+}
+
 pub(super) fn expand_global_asm<'cx>(
     ecx: &'cx mut ExtCtxt<'_>,
     sp: Span,
diff --git a/compiler/rustc_builtin_macros/src/lib.rs b/compiler/rustc_builtin_macros/src/lib.rs
index 30e1c8d262216..ebe5e2b544292 100644
--- a/compiler/rustc_builtin_macros/src/lib.rs
+++ b/compiler/rustc_builtin_macros/src/lib.rs
@@ -94,6 +94,7 @@ pub fn register_builtin_macros(resolver: &mut dyn ResolverExpand) {
         line: source_util::expand_line,
         log_syntax: log_syntax::expand_log_syntax,
         module_path: source_util::expand_mod,
+        naked_asm: asm::expand_naked_asm,
         option_env: env::expand_option_env,
         pattern_type: pattern_type::expand,
         std_panic: edition_panic::expand_panic,
diff --git a/compiler/rustc_builtin_macros/src/test_harness.rs b/compiler/rustc_builtin_macros/src/test_harness.rs
index a9e4434581163..a694d3b8c2857 100644
--- a/compiler/rustc_builtin_macros/src/test_harness.rs
+++ b/compiler/rustc_builtin_macros/src/test_harness.rs
@@ -326,6 +326,8 @@ fn mk_main(cx: &mut TestCtxt<'_>) -> P<ast::Item> {
     let main_attr = ecx.attr_word(sym::rustc_main, sp);
     // #[coverage(off)]
     let coverage_attr = ecx.attr_nested_word(sym::coverage, sym::off, sp);
+    // #[allow(missing_docs)]
+    let missing_docs_attr = ecx.attr_nested_word(sym::allow, sym::missing_docs, sp);
 
     // pub fn main() { ... }
     let main_ret_ty = ecx.ty(sp, ast::TyKind::Tup(ThinVec::new()));
@@ -355,7 +357,7 @@ fn mk_main(cx: &mut TestCtxt<'_>) -> P<ast::Item> {
 
     let main = P(ast::Item {
         ident: main_id,
-        attrs: thin_vec![main_attr, coverage_attr],
+        attrs: thin_vec![main_attr, coverage_attr, missing_docs_attr],
         id: ast::DUMMY_NODE_ID,
         kind: main,
         vis: ast::Visibility { span: sp, kind: ast::VisibilityKind::Public, tokens: None },
diff --git a/compiler/rustc_codegen_gcc/src/back/lto.rs b/compiler/rustc_codegen_gcc/src/back/lto.rs
index 6b2dbbbed6771..c2adab7137f62 100644
--- a/compiler/rustc_codegen_gcc/src/back/lto.rs
+++ b/compiler/rustc_codegen_gcc/src/back/lto.rs
@@ -272,7 +272,6 @@ fn fat_lto(
             }*/
         }
     };
-    let mut serialized_bitcode = Vec::new();
     {
         info!("using {:?} as a base module", module.name);
 
@@ -317,7 +316,6 @@ fn fat_lto(
                     unimplemented!("from uncompressed file")
                 }
             }
-            serialized_bitcode.push(bc_decoded);
         }
         save_temp_bitcode(cgcx, &module, "lto.input");
 
@@ -337,7 +335,7 @@ fn fat_lto(
     // of now.
     module.module_llvm.temp_dir = Some(tmp_path);
 
-    Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: serialized_bitcode })
+    Ok(LtoModuleCodegen::Fat(module))
 }
 
 pub struct ModuleBuffer(PathBuf);
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index aa6842c75cec7..09896b89ebf42 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -314,7 +314,6 @@ fn fat_lto(
             }
         }
     };
-    let mut serialized_bitcode = Vec::new();
     {
         let (llcx, llmod) = {
             let llvm = &module.module_llvm;
@@ -342,9 +341,7 @@ fn fat_lto(
         serialized_modules.sort_by(|module1, module2| module1.1.cmp(&module2.1));
 
         // For all serialized bitcode files we parse them and link them in as we did
-        // above, this is all mostly handled in C++. Like above, though, we don't
-        // know much about the memory management here so we err on the side of being
-        // save and persist everything with the original module.
+        // above, this is all mostly handled in C++.
         let mut linker = Linker::new(llmod);
         for (bc_decoded, name) in serialized_modules {
             let _timer = cgcx
@@ -355,7 +352,6 @@ fn fat_lto(
             info!("linking {:?}", name);
             let data = bc_decoded.data();
             linker.add(data).map_err(|()| write::llvm_err(dcx, LlvmError::LoadBitcode { name }))?;
-            serialized_bitcode.push(bc_decoded);
         }
         drop(linker);
         save_temp_bitcode(cgcx, &module, "lto.input");
@@ -372,7 +368,7 @@ fn fat_lto(
         }
     }
 
-    Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: serialized_bitcode })
+    Ok(LtoModuleCodegen::Fat(module))
 }
 
 pub(crate) struct Linker<'a>(&'a mut llvm::Linker<'a>);
diff --git a/compiler/rustc_codegen_ssa/src/back/lto.rs b/compiler/rustc_codegen_ssa/src/back/lto.rs
index 8b6f6b5a220a4..1e1e039882be0 100644
--- a/compiler/rustc_codegen_ssa/src/back/lto.rs
+++ b/compiler/rustc_codegen_ssa/src/back/lto.rs
@@ -41,18 +41,14 @@ pub struct ThinShared<B: WriteBackendMethods> {
 }
 
 pub enum LtoModuleCodegen<B: WriteBackendMethods> {
-    Fat {
-        module: ModuleCodegen<B::Module>,
-        _serialized_bitcode: Vec<SerializedModule<B::ModuleBuffer>>,
-    },
-
+    Fat(ModuleCodegen<B::Module>),
     Thin(ThinModule<B>),
 }
 
 impl<B: WriteBackendMethods> LtoModuleCodegen<B> {
     pub fn name(&self) -> &str {
         match *self {
-            LtoModuleCodegen::Fat { .. } => "everything",
+            LtoModuleCodegen::Fat(_) => "everything",
             LtoModuleCodegen::Thin(ref m) => m.name(),
         }
     }
@@ -68,7 +64,7 @@ impl<B: WriteBackendMethods> LtoModuleCodegen<B> {
         cgcx: &CodegenContext<B>,
     ) -> Result<ModuleCodegen<B::Module>, FatalError> {
         match self {
-            LtoModuleCodegen::Fat { mut module, .. } => {
+            LtoModuleCodegen::Fat(mut module) => {
                 B::optimize_fat(cgcx, &mut module)?;
                 Ok(module)
             }
@@ -81,7 +77,7 @@ impl<B: WriteBackendMethods> LtoModuleCodegen<B> {
     pub fn cost(&self) -> u64 {
         match *self {
             // Only one module with fat LTO, so the cost doesn't matter.
-            LtoModuleCodegen::Fat { .. } => 0,
+            LtoModuleCodegen::Fat(_) => 0,
             LtoModuleCodegen::Thin(ref m) => m.cost(),
         }
     }
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index 96b3ec6f18728..7ccebd83f24f7 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -94,7 +94,7 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>(
     let intern_result = intern_const_alloc_recursive(ecx, intern_kind, &ret);
 
     // Since evaluation had no errors, validate the resulting constant.
-    const_validate_mplace(&ecx, &ret, cid)?;
+    const_validate_mplace(ecx, &ret, cid)?;
 
     // Only report this after validation, as validaiton produces much better diagnostics.
     // FIXME: ensure validation always reports this and stop making interning care about it.
@@ -391,7 +391,7 @@ fn eval_in_interpreter<'tcx, R: InterpretationResult<'tcx>>(
 
 #[inline(always)]
 fn const_validate_mplace<'tcx>(
-    ecx: &InterpCx<'tcx, CompileTimeMachine<'tcx>>,
+    ecx: &mut InterpCx<'tcx, CompileTimeMachine<'tcx>>,
     mplace: &MPlaceTy<'tcx>,
     cid: GlobalId<'tcx>,
 ) -> Result<(), ErrorHandled> {
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
index 9c1fef095f552..7405ca09342da 100644
--- a/compiler/rustc_const_eval/src/const_eval/machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -1,16 +1,16 @@
-use std::borrow::Borrow;
+use std::borrow::{Borrow, Cow};
 use std::fmt;
 use std::hash::Hash;
 use std::ops::ControlFlow;
 
 use rustc_ast::Mutability;
-use rustc_data_structures::fx::{FxIndexMap, IndexEntry};
+use rustc_data_structures::fx::{FxHashMap, FxIndexMap, IndexEntry};
 use rustc_hir::def_id::{DefId, LocalDefId};
 use rustc_hir::{self as hir, LangItem, CRATE_HIR_ID};
 use rustc_middle::mir::AssertMessage;
 use rustc_middle::query::TyCtxtAt;
 use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout};
-use rustc_middle::ty::{self, TyCtxt};
+use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_middle::{bug, mir};
 use rustc_span::symbol::{sym, Symbol};
 use rustc_span::Span;
@@ -24,8 +24,8 @@ use crate::fluent_generated as fluent;
 use crate::interpret::{
     self, compile_time_machine, err_ub, throw_exhaust, throw_inval, throw_ub_custom, throw_unsup,
     throw_unsup_format, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, Frame,
-    GlobalAlloc, ImmTy, InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, PointerArithmetic, Scalar,
-    StackPopCleanup,
+    GlobalAlloc, ImmTy, InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, PointerArithmetic,
+    RangeSet, Scalar, StackPopCleanup,
 };
 
 /// When hitting this many interpreted terminators we emit a deny by default lint
@@ -65,6 +65,9 @@ pub struct CompileTimeMachine<'tcx> {
     /// storing the result in the given `AllocId`.
     /// Used to prevent reads from a static's base allocation, as that may allow for self-initialization loops.
     pub(crate) static_root_ids: Option<(AllocId, LocalDefId)>,
+
+    /// A cache of "data range" computations for unions (i.e., the offsets of non-padding bytes).
+    union_data_ranges: FxHashMap<Ty<'tcx>, RangeSet>,
 }
 
 #[derive(Copy, Clone)]
@@ -99,6 +102,7 @@ impl<'tcx> CompileTimeMachine<'tcx> {
             can_access_mut_global,
             check_alignment,
             static_root_ids: None,
+            union_data_ranges: FxHashMap::default(),
         }
     }
 }
@@ -766,6 +770,19 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
         }
         Ok(())
     }
+
+    fn cached_union_data_range<'e>(
+        ecx: &'e mut InterpCx<'tcx, Self>,
+        ty: Ty<'tcx>,
+        compute_range: impl FnOnce() -> RangeSet,
+    ) -> Cow<'e, RangeSet> {
+        if ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks {
+            Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range))
+        } else {
+            // Don't bother caching, we're only doing one validation at the end anyway.
+            Cow::Owned(compute_range())
+        }
+    }
 }
 
 // Please do not add any code below the above `Machine` trait impl. I (oli-obk) plan more cleanups
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index 0008a15722bde..de93ed85704b5 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -7,7 +7,7 @@ use rustc_target::abi::{self, TagEncoding, VariantIdx, Variants};
 use tracing::{instrument, trace};
 
 use super::{
-    err_ub, throw_ub, ImmTy, InterpCx, InterpResult, Machine, Readable, Scalar, Writeable,
+    err_ub, throw_ub, ImmTy, InterpCx, InterpResult, Machine, Projectable, Scalar, Writeable,
 };
 
 impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
@@ -60,7 +60,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     #[instrument(skip(self), level = "trace")]
     pub fn read_discriminant(
         &self,
-        op: &impl Readable<'tcx, M::Provenance>,
+        op: &impl Projectable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, VariantIdx> {
         let ty = op.layout().ty;
         trace!("read_discriminant_value {:#?}", op.layout());
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index 6cfd7be48e624..8cab3c34eedfb 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -10,6 +10,7 @@ use rustc_apfloat::{Float, FloatConvert};
 use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
 use rustc_middle::query::TyCtxtAt;
 use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::Ty;
 use rustc_middle::{mir, ty};
 use rustc_span::def_id::DefId;
 use rustc_span::Span;
@@ -19,7 +20,7 @@ use rustc_target::spec::abi::Abi as CallAbi;
 use super::{
     throw_unsup, throw_unsup_format, AllocBytes, AllocId, AllocKind, AllocRange, Allocation,
     ConstAllocation, CtfeProvenance, FnArg, Frame, ImmTy, InterpCx, InterpResult, MPlaceTy,
-    MemoryKind, Misalignment, OpTy, PlaceTy, Pointer, Provenance, CTFE_ALLOC_SALT,
+    MemoryKind, Misalignment, OpTy, PlaceTy, Pointer, Provenance, RangeSet, CTFE_ALLOC_SALT,
 };
 
 /// Data returned by [`Machine::after_stack_pop`], and consumed by
@@ -578,6 +579,15 @@ pub trait Machine<'tcx>: Sized {
         ecx: &InterpCx<'tcx, Self>,
         instance: Option<ty::Instance<'tcx>>,
     ) -> usize;
+
+    fn cached_union_data_range<'e>(
+        _ecx: &'e mut InterpCx<'tcx, Self>,
+        _ty: Ty<'tcx>,
+        compute_range: impl FnOnce() -> RangeSet,
+    ) -> Cow<'e, RangeSet> {
+        // Default to no caching.
+        Cow::Owned(compute_range())
+    }
 }
 
 /// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 45a5eb9bd52fc..d87588496c0bd 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -8,9 +8,8 @@
 
 use std::assert_matches::assert_matches;
 use std::borrow::Cow;
-use std::cell::Cell;
 use std::collections::VecDeque;
-use std::{fmt, ptr};
+use std::{fmt, mem, ptr};
 
 use rustc_ast::Mutability;
 use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
@@ -118,7 +117,7 @@ pub struct Memory<'tcx, M: Machine<'tcx>> {
     /// This stores whether we are currently doing reads purely for the purpose of validation.
     /// Those reads do not trigger the machine's hooks for memory reads.
     /// Needless to say, this must only be set with great care!
-    validation_in_progress: Cell<bool>,
+    validation_in_progress: bool,
 }
 
 /// A reference to some allocation that was already bounds-checked for the given region
@@ -145,7 +144,7 @@ impl<'tcx, M: Machine<'tcx>> Memory<'tcx, M> {
             alloc_map: M::MemoryMap::default(),
             extra_fn_ptr_map: FxIndexMap::default(),
             dead_alloc_map: FxIndexMap::default(),
-            validation_in_progress: Cell::new(false),
+            validation_in_progress: false,
         }
     }
 
@@ -682,7 +681,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         // We want to call the hook on *all* accesses that involve an AllocId, including zero-sized
         // accesses. That means we cannot rely on the closure above or the `Some` branch below. We
         // do this after `check_and_deref_ptr` to ensure some basic sanity has already been checked.
-        if !self.memory.validation_in_progress.get() {
+        if !self.memory.validation_in_progress {
             if let Ok((alloc_id, ..)) = self.ptr_try_get_alloc_id(ptr, size_i64) {
                 M::before_alloc_read(self, alloc_id)?;
             }
@@ -690,7 +689,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
 
         if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
             let range = alloc_range(offset, size);
-            if !self.memory.validation_in_progress.get() {
+            if !self.memory.validation_in_progress {
                 M::before_memory_read(
                     self.tcx,
                     &self.machine,
@@ -766,11 +765,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         let parts = self.get_ptr_access(ptr, size)?;
         if let Some((alloc_id, offset, prov)) = parts {
             let tcx = self.tcx;
+            let validation_in_progress = self.memory.validation_in_progress;
             // FIXME: can we somehow avoid looking up the allocation twice here?
             // We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
             let (alloc, machine) = self.get_alloc_raw_mut(alloc_id)?;
             let range = alloc_range(offset, size);
-            M::before_memory_write(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?;
+            if !validation_in_progress {
+                M::before_memory_write(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?;
+            }
             Ok(Some(AllocRefMut { alloc, range, tcx: *tcx, alloc_id }))
         } else {
             Ok(None)
@@ -1014,16 +1016,16 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     ///
     /// We do this so Miri's allocation access tracking does not show the validation
     /// reads as spurious accesses.
-    pub fn run_for_validation<R>(&self, f: impl FnOnce() -> R) -> R {
+    pub fn run_for_validation<R>(&mut self, f: impl FnOnce(&mut Self) -> R) -> R {
         // This deliberately uses `==` on `bool` to follow the pattern
         // `assert!(val.replace(new) == old)`.
         assert!(
-            self.memory.validation_in_progress.replace(true) == false,
+            mem::replace(&mut self.memory.validation_in_progress, true) == false,
             "`validation_in_progress` was already set"
         );
-        let res = f();
+        let res = f(self);
         assert!(
-            self.memory.validation_in_progress.replace(false) == true,
+            mem::replace(&mut self.memory.validation_in_progress, false) == true,
             "`validation_in_progress` was unset by someone else"
         );
         res
@@ -1115,6 +1117,10 @@ impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for DumpAllocs<'a, 'tcx, M> {
 impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes>
     AllocRefMut<'a, 'tcx, Prov, Extra, Bytes>
 {
+    pub fn as_ref<'b>(&'b self) -> AllocRef<'b, 'tcx, Prov, Extra, Bytes> {
+        AllocRef { alloc: self.alloc, range: self.range, tcx: self.tcx, alloc_id: self.alloc_id }
+    }
+
     /// `range` is relative to this allocation reference, not the base of the allocation.
     pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
         let range = self.range.subrange(range);
@@ -1130,13 +1136,30 @@ impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes>
         self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
     }
 
+    /// Mark the given sub-range (relative to this allocation reference) as uninitialized.
+    pub fn write_uninit(&mut self, range: AllocRange) -> InterpResult<'tcx> {
+        let range = self.range.subrange(range);
+        Ok(self
+            .alloc
+            .write_uninit(&self.tcx, range)
+            .map_err(|e| e.to_interp_error(self.alloc_id))?)
+    }
+
     /// Mark the entire referenced range as uninitialized
-    pub fn write_uninit(&mut self) -> InterpResult<'tcx> {
+    pub fn write_uninit_full(&mut self) -> InterpResult<'tcx> {
         Ok(self
             .alloc
             .write_uninit(&self.tcx, self.range)
             .map_err(|e| e.to_interp_error(self.alloc_id))?)
     }
+
+    /// Remove all provenance in the reference range.
+    pub fn clear_provenance(&mut self) -> InterpResult<'tcx> {
+        Ok(self
+            .alloc
+            .clear_provenance(&self.tcx, self.range)
+            .map_err(|e| e.to_interp_error(self.alloc_id))?)
+    }
 }
 
 impl<'tcx, 'a, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Prov, Extra, Bytes> {
@@ -1278,7 +1301,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         };
         let src_alloc = self.get_alloc_raw(src_alloc_id)?;
         let src_range = alloc_range(src_offset, size);
-        assert!(!self.memory.validation_in_progress.get(), "we can't be copying during validation");
+        assert!(!self.memory.validation_in_progress, "we can't be copying during validation");
         M::before_memory_read(
             tcx,
             &self.machine,
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index 511756e3f86c9..561d681f804a1 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -33,11 +33,11 @@ pub(crate) use self::intrinsics::eval_nullary_intrinsic;
 pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, ReturnAction};
 pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
 use self::operand::Operand;
-pub use self::operand::{ImmTy, Immediate, OpTy, Readable};
+pub use self::operand::{ImmTy, Immediate, OpTy};
 pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable};
 use self::place::{MemPlace, Place};
 pub use self::projection::{OffsetMode, Projectable};
 pub use self::stack::{Frame, FrameInfo, LocalState, StackPopCleanup, StackPopInfo};
 pub(crate) use self::util::create_static_alloc;
-pub use self::validity::{CtfeValidationMode, RefTracking};
+pub use self::validity::{CtfeValidationMode, RangeSet, RefTracking};
 pub use self::visitor::ValueVisitor;
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index 9a8ccaa7cc5ca..b906e3422dba5 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -111,6 +111,46 @@ impl<Prov: Provenance> Immediate<Prov> {
             Immediate::Uninit => bug!("Got uninit where a scalar or scalar pair was expected"),
         }
     }
+
+    /// Assert that this immediate is a valid value for the given ABI.
+    pub fn assert_matches_abi(self, abi: Abi, cx: &impl HasDataLayout) {
+        match (self, abi) {
+            (Immediate::Scalar(scalar), Abi::Scalar(s)) => {
+                assert_eq!(scalar.size(), s.size(cx));
+                if !matches!(s.primitive(), abi::Pointer(..)) {
+                    assert!(matches!(scalar, Scalar::Int(..)));
+                }
+            }
+            (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
+                assert_eq!(a_val.size(), a.size(cx));
+                if !matches!(a.primitive(), abi::Pointer(..)) {
+                    assert!(matches!(a_val, Scalar::Int(..)));
+                }
+                assert_eq!(b_val.size(), b.size(cx));
+                if !matches!(b.primitive(), abi::Pointer(..)) {
+                    assert!(matches!(b_val, Scalar::Int(..)));
+                }
+            }
+            (Immediate::Uninit, _) => {}
+            _ => {
+                bug!("value {self:?} does not match ABI {abi:?})",)
+            }
+        }
+    }
+
+    pub fn clear_provenance<'tcx>(&mut self) -> InterpResult<'tcx> {
+        match self {
+            Immediate::Scalar(s) => {
+                s.clear_provenance()?;
+            }
+            Immediate::ScalarPair(a, b) => {
+                a.clear_provenance()?;
+                b.clear_provenance()?;
+            }
+            Immediate::Uninit => {}
+        }
+        Ok(())
+    }
 }
 
 // ScalarPair needs a type to interpret, so we often have an immediate and a type together
@@ -490,32 +530,6 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
     }
 }
 
-/// The `Readable` trait describes interpreter values that one can read from.
-pub trait Readable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
-    fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>>;
-}
-
-impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for OpTy<'tcx, Prov> {
-    #[inline(always)]
-    fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
-        self.as_mplace_or_imm()
-    }
-}
-
-impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
-    #[inline(always)]
-    fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
-        Left(self.clone())
-    }
-}
-
-impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for ImmTy<'tcx, Prov> {
-    #[inline(always)]
-    fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
-        Right(self.clone())
-    }
-}
-
 impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
     /// Returns `None` if the layout does not permit loading this as a value.
@@ -588,9 +602,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// ConstProp needs it, though.
     pub fn read_immediate_raw(
         &self,
-        src: &impl Readable<'tcx, M::Provenance>,
+        src: &impl Projectable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, Either<MPlaceTy<'tcx, M::Provenance>, ImmTy<'tcx, M::Provenance>>> {
-        Ok(match src.as_mplace_or_imm() {
+        Ok(match src.to_op(self)?.as_mplace_or_imm() {
             Left(ref mplace) => {
                 if let Some(val) = self.read_immediate_from_mplace_raw(mplace)? {
                     Right(val)
@@ -608,7 +622,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     #[inline(always)]
     pub fn read_immediate(
         &self,
-        op: &impl Readable<'tcx, M::Provenance>,
+        op: &impl Projectable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
         if !matches!(
             op.layout().abi,
@@ -627,7 +641,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// Read a scalar from a place
     pub fn read_scalar(
         &self,
-        op: &impl Readable<'tcx, M::Provenance>,
+        op: &impl Projectable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
         Ok(self.read_immediate(op)?.to_scalar())
     }
@@ -638,21 +652,21 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// Read a pointer from a place.
     pub fn read_pointer(
         &self,
-        op: &impl Readable<'tcx, M::Provenance>,
+        op: &impl Projectable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
         self.read_scalar(op)?.to_pointer(self)
     }
     /// Read a pointer-sized unsigned integer from a place.
     pub fn read_target_usize(
         &self,
-        op: &impl Readable<'tcx, M::Provenance>,
+        op: &impl Projectable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, u64> {
         self.read_scalar(op)?.to_target_usize(self)
     }
     /// Read a pointer-sized signed integer from a place.
     pub fn read_target_isize(
         &self,
-        op: &impl Readable<'tcx, M::Provenance>,
+        op: &impl Projectable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, i64> {
         self.read_scalar(op)?.to_target_isize(self)
     }
@@ -717,7 +731,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
         match place.as_mplace_or_local() {
             Left(mplace) => Ok(mplace.into()),
-            Right((local, offset, locals_addr)) => {
+            Right((local, offset, locals_addr, _)) => {
                 debug_assert!(place.layout.is_sized()); // only sized locals can ever be `Place::Local`.
                 debug_assert_eq!(locals_addr, self.frame().locals_addr());
                 let base = self.local_to_op(local, None)?;
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index 840f7986c6e0a..3b14142da02ed 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -15,7 +15,7 @@ use tracing::{instrument, trace};
 use super::{
     alloc_range, mir_assign_valid_types, AllocRef, AllocRefMut, CheckAlignMsg, CtfeProvenance,
     ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, Misalignment, OffsetMode, OpTy,
-    Operand, Pointer, Projectable, Provenance, Readable, Scalar,
+    Operand, Pointer, Projectable, Provenance, Scalar,
 };
 
 #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@@ -180,7 +180,8 @@ pub(super) enum Place<Prov: Provenance = CtfeProvenance> {
     Ptr(MemPlace<Prov>),
 
     /// To support alloc-free locals, we are able to write directly to a local. The offset indicates
-    /// where in the local this place is located; if it is `None`, no projection has been applied.
+    /// where in the local this place is located; if it is `None`, no projection has been applied
+    /// and the type of the place is exactly the type of the local.
     /// Such projections are meaningful even if the offset is 0, since they can change layouts.
     /// (Without that optimization, we'd just always be a `MemPlace`.)
     /// `Local` places always refer to the current stack frame, so they are unstable under
@@ -231,10 +232,12 @@ impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
     #[inline(always)]
     pub fn as_mplace_or_local(
         &self,
-    ) -> Either<MPlaceTy<'tcx, Prov>, (mir::Local, Option<Size>, usize)> {
+    ) -> Either<MPlaceTy<'tcx, Prov>, (mir::Local, Option<Size>, usize, TyAndLayout<'tcx>)> {
         match self.place {
             Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout }),
-            Place::Local { local, offset, locals_addr } => Right((local, offset, locals_addr)),
+            Place::Local { local, offset, locals_addr } => {
+                Right((local, offset, locals_addr, self.layout))
+            }
         }
     }
 
@@ -277,7 +280,7 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
     ) -> InterpResult<'tcx, Self> {
         Ok(match self.as_mplace_or_local() {
             Left(mplace) => mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into(),
-            Right((local, old_offset, locals_addr)) => {
+            Right((local, old_offset, locals_addr, _)) => {
                 debug_assert!(layout.is_sized(), "unsized locals should live in memory");
                 assert_matches!(meta, MemPlaceMeta::None); // we couldn't store it anyway...
                 // `Place::Local` are always in-bounds of their surrounding local, so we can just
@@ -328,9 +331,7 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
 
 /// The `Weiteable` trait describes interpreter values that can be written to.
 pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
-    fn as_mplace_or_local(
-        &self,
-    ) -> Either<MPlaceTy<'tcx, Prov>, (mir::Local, Option<Size>, usize, TyAndLayout<'tcx>)>;
+    fn to_place(&self) -> PlaceTy<'tcx, Prov>;
 
     fn force_mplace<M: Machine<'tcx, Provenance = Prov>>(
         &self,
@@ -340,11 +341,8 @@ pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
 
 impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
     #[inline(always)]
-    fn as_mplace_or_local(
-        &self,
-    ) -> Either<MPlaceTy<'tcx, Prov>, (mir::Local, Option<Size>, usize, TyAndLayout<'tcx>)> {
-        self.as_mplace_or_local()
-            .map_right(|(local, offset, locals_addr)| (local, offset, locals_addr, self.layout))
+    fn to_place(&self) -> PlaceTy<'tcx, Prov> {
+        self.clone()
     }
 
     #[inline(always)]
@@ -358,10 +356,8 @@ impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
 
 impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
     #[inline(always)]
-    fn as_mplace_or_local(
-        &self,
-    ) -> Either<MPlaceTy<'tcx, Prov>, (mir::Local, Option<Size>, usize, TyAndLayout<'tcx>)> {
-        Left(self.clone())
+    fn to_place(&self) -> PlaceTy<'tcx, Prov> {
+        self.clone().into()
     }
 
     #[inline(always)]
@@ -436,7 +432,7 @@ where
     #[instrument(skip(self), level = "trace")]
     pub fn deref_pointer(
         &self,
-        src: &impl Readable<'tcx, M::Provenance>,
+        src: &impl Projectable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
         if src.layout().ty.is_box() {
             // Derefer should have removed all Box derefs.
@@ -562,6 +558,40 @@ where
         Ok(place)
     }
 
+    /// Given a place, returns either the underlying mplace or a reference to where the value of
+    /// this place is stored.
+    fn as_mplace_or_mutable_local(
+        &mut self,
+        place: &PlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<
+        'tcx,
+        Either<MPlaceTy<'tcx, M::Provenance>, (&mut Immediate<M::Provenance>, TyAndLayout<'tcx>)>,
+    > {
+        Ok(match place.to_place().as_mplace_or_local() {
+            Left(mplace) => Left(mplace),
+            Right((local, offset, locals_addr, layout)) => {
+                if offset.is_some() {
+                    // This has been projected to a part of this local, or had the type changed.
+                    // FIMXE: there are cases where we could still avoid allocating an mplace.
+                    Left(place.force_mplace(self)?)
+                } else {
+                    debug_assert_eq!(locals_addr, self.frame().locals_addr());
+                    debug_assert_eq!(self.layout_of_local(self.frame(), local, None)?, layout);
+                    match self.frame_mut().locals[local].access_mut()? {
+                        Operand::Indirect(mplace) => {
+                            // The local is in memory.
+                            Left(MPlaceTy { mplace: *mplace, layout })
+                        }
+                        Operand::Immediate(local_val) => {
+                            // The local still has the optimized representation.
+                            Right((local_val, layout))
+                        }
+                    }
+                }
+            }
+        })
+    }
+
     /// Write an immediate to a place
     #[inline(always)]
     #[instrument(skip(self), level = "trace")]
@@ -574,9 +604,11 @@ where
 
         if M::enforce_validity(self, dest.layout()) {
             // Data got changed, better make sure it matches the type!
+            // Also needed to reset padding.
             self.validate_operand(
-                &dest.to_op(self)?,
+                &dest.to_place(),
                 M::enforce_validity_recursively(self, dest.layout()),
+                /*reset_provenance_and_padding*/ true,
             )?;
         }
 
@@ -606,67 +638,27 @@ where
     /// Write an immediate to a place.
     /// If you use this you are responsible for validating that things got copied at the
     /// right type.
-    fn write_immediate_no_validate(
+    pub(super) fn write_immediate_no_validate(
         &mut self,
         src: Immediate<M::Provenance>,
         dest: &impl Writeable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
         assert!(dest.layout().is_sized(), "Cannot write unsized immediate data");
 
-        // See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
-        // but not factored as a separate function.
-        let mplace = match dest.as_mplace_or_local() {
-            Right((local, offset, locals_addr, layout)) => {
-                if offset.is_some() {
-                    // This has been projected to a part of this local. We could have complicated
-                    // logic to still keep this local as an `Operand`... but it's much easier to
-                    // just fall back to the indirect path.
-                    dest.force_mplace(self)?
-                } else {
-                    debug_assert_eq!(locals_addr, self.frame().locals_addr());
-                    match self.frame_mut().locals[local].access_mut()? {
-                        Operand::Immediate(local_val) => {
-                            // Local can be updated in-place.
-                            *local_val = src;
-                            // Double-check that the value we are storing and the local fit to each other.
-                            // (*After* doing the update for borrow checker reasons.)
-                            if cfg!(debug_assertions) {
-                                let local_layout =
-                                    self.layout_of_local(&self.frame(), local, None)?;
-                                match (src, local_layout.abi) {
-                                    (Immediate::Scalar(scalar), Abi::Scalar(s)) => {
-                                        assert_eq!(scalar.size(), s.size(self))
-                                    }
-                                    (
-                                        Immediate::ScalarPair(a_val, b_val),
-                                        Abi::ScalarPair(a, b),
-                                    ) => {
-                                        assert_eq!(a_val.size(), a.size(self));
-                                        assert_eq!(b_val.size(), b.size(self));
-                                    }
-                                    (Immediate::Uninit, _) => {}
-                                    (src, abi) => {
-                                        bug!(
-                                            "value {src:?} cannot be written into local with type {} (ABI {abi:?})",
-                                            local_layout.ty
-                                        )
-                                    }
-                                };
-                            }
-                            return Ok(());
-                        }
-                        Operand::Indirect(mplace) => {
-                            // The local is in memory, go on below.
-                            MPlaceTy { mplace: *mplace, layout }
-                        }
-                    }
+        match self.as_mplace_or_mutable_local(&dest.to_place())? {
+            Right((local_val, local_layout)) => {
+                // Local can be updated in-place.
+                *local_val = src;
+                // Double-check that the value we are storing and the local fit to each other.
+                if cfg!(debug_assertions) {
+                    src.assert_matches_abi(local_layout.abi, self);
                 }
             }
-            Left(mplace) => mplace, // already referring to memory
-        };
-
-        // This is already in memory, write there.
-        self.write_immediate_to_mplace_no_validate(src, mplace.layout, mplace.mplace)
+            Left(mplace) => {
+                self.write_immediate_to_mplace_no_validate(src, mplace.layout, mplace.mplace)?;
+            }
+        }
+        Ok(())
     }
 
     /// Write an immediate to memory.
@@ -678,6 +670,9 @@ where
         layout: TyAndLayout<'tcx>,
         dest: MemPlace<M::Provenance>,
     ) -> InterpResult<'tcx> {
+        if cfg!(debug_assertions) {
+            value.assert_matches_abi(layout.abi, self);
+        }
         // Note that it is really important that the type here is the right one, and matches the
         // type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
         // to handle padding properly, which is only correct if we never look at this data with the
@@ -691,15 +686,7 @@ where
 
         match value {
             Immediate::Scalar(scalar) => {
-                let Abi::Scalar(s) = layout.abi else {
-                    span_bug!(
-                        self.cur_span(),
-                        "write_immediate_to_mplace: invalid Scalar layout: {layout:#?}",
-                    )
-                };
-                let size = s.size(&tcx);
-                assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
-                alloc.write_scalar(alloc_range(Size::ZERO, size), scalar)
+                alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)
             }
             Immediate::ScalarPair(a_val, b_val) => {
                 let Abi::ScalarPair(a, b) = layout.abi else {
@@ -709,18 +696,19 @@ where
                         layout
                     )
                 };
-                let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
-                let b_offset = a_size.align_to(b.align(&tcx).abi);
+                let b_offset = a.size(&tcx).align_to(b.align(&tcx).abi);
                 assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
 
                 // It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
                 // but that does not work: We could be a newtype around a pair, then the
                 // fields do not match the `ScalarPair` components.
 
-                alloc.write_scalar(alloc_range(Size::ZERO, a_size), a_val)?;
-                alloc.write_scalar(alloc_range(b_offset, b_size), b_val)
+                alloc.write_scalar(alloc_range(Size::ZERO, a_val.size()), a_val)?;
+                alloc.write_scalar(alloc_range(b_offset, b_val.size()), b_val)?;
+                // We don't have to reset padding here, `write_immediate` will anyway do a validation run.
+                Ok(())
             }
-            Immediate::Uninit => alloc.write_uninit(),
+            Immediate::Uninit => alloc.write_uninit_full(),
         }
     }
 
@@ -728,35 +716,38 @@ where
         &mut self,
         dest: &impl Writeable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
-        let mplace = match dest.as_mplace_or_local() {
-            Left(mplace) => mplace,
-            Right((local, offset, locals_addr, layout)) => {
-                if offset.is_some() {
-                    // This has been projected to a part of this local. We could have complicated
-                    // logic to still keep this local as an `Operand`... but it's much easier to
-                    // just fall back to the indirect path.
-                    // FIXME: share the logic with `write_immediate_no_validate`.
-                    dest.force_mplace(self)?
-                } else {
-                    debug_assert_eq!(locals_addr, self.frame().locals_addr());
-                    match self.frame_mut().locals[local].access_mut()? {
-                        Operand::Immediate(local) => {
-                            *local = Immediate::Uninit;
-                            return Ok(());
-                        }
-                        Operand::Indirect(mplace) => {
-                            // The local is in memory, go on below.
-                            MPlaceTy { mplace: *mplace, layout }
-                        }
-                    }
-                }
+        match self.as_mplace_or_mutable_local(&dest.to_place())? {
+            Right((local_val, _local_layout)) => {
+                *local_val = Immediate::Uninit;
             }
-        };
-        let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
-            // Zero-sized access
-            return Ok(());
-        };
-        alloc.write_uninit()?;
+            Left(mplace) => {
+                let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
+                    // Zero-sized access
+                    return Ok(());
+                };
+                alloc.write_uninit_full()?;
+            }
+        }
+        Ok(())
+    }
+
+    /// Remove all provenance in the given place.
+    pub fn clear_provenance(
+        &mut self,
+        dest: &impl Writeable<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        match self.as_mplace_or_mutable_local(&dest.to_place())? {
+            Right((local_val, _local_layout)) => {
+                local_val.clear_provenance()?;
+            }
+            Left(mplace) => {
+                let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
+                    // Zero-sized access
+                    return Ok(());
+                };
+                alloc.clear_provenance()?;
+            }
+        }
         Ok(())
     }
 
@@ -768,7 +759,7 @@ where
     #[inline(always)]
     pub(super) fn copy_op_no_dest_validation(
         &mut self,
-        src: &impl Readable<'tcx, M::Provenance>,
+        src: &impl Projectable<'tcx, M::Provenance>,
         dest: &impl Writeable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
         self.copy_op_inner(
@@ -781,7 +772,7 @@ where
     #[inline(always)]
     pub fn copy_op_allow_transmute(
         &mut self,
-        src: &impl Readable<'tcx, M::Provenance>,
+        src: &impl Projectable<'tcx, M::Provenance>,
         dest: &impl Writeable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
         self.copy_op_inner(
@@ -794,7 +785,7 @@ where
     #[inline(always)]
     pub fn copy_op(
         &mut self,
-        src: &impl Readable<'tcx, M::Provenance>,
+        src: &impl Projectable<'tcx, M::Provenance>,
         dest: &impl Writeable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
         self.copy_op_inner(
@@ -808,28 +799,35 @@ where
     #[instrument(skip(self), level = "trace")]
     fn copy_op_inner(
         &mut self,
-        src: &impl Readable<'tcx, M::Provenance>,
+        src: &impl Projectable<'tcx, M::Provenance>,
         dest: &impl Writeable<'tcx, M::Provenance>,
         allow_transmute: bool,
         validate_dest: bool,
     ) -> InterpResult<'tcx> {
-        // Generally for transmutation, data must be valid both at the old and new type.
-        // But if the types are the same, the 2nd validation below suffices.
-        if src.layout().ty != dest.layout().ty && M::enforce_validity(self, src.layout()) {
-            self.validate_operand(
-                &src.to_op(self)?,
-                M::enforce_validity_recursively(self, src.layout()),
-            )?;
-        }
+        // These are technically *two* typed copies: `src` is a not-yet-loaded value,
+        // so we're going a typed copy at `src` type from there to some intermediate storage.
+        // And then we're doing a second typed copy from that intermediate storage to `dest`.
+        // But as an optimization, we only make a single direct copy here.
 
         // Do the actual copy.
         self.copy_op_no_validate(src, dest, allow_transmute)?;
 
         if validate_dest && M::enforce_validity(self, dest.layout()) {
-            // Data got changed, better make sure it matches the type!
+            let dest = dest.to_place();
+            // Given that there were two typed copies, we have to ensure this is valid at both types,
+            // and we have to ensure this loses provenance and padding according to both types.
+            // But if the types are identical, we only do one pass.
+            if allow_transmute && src.layout().ty != dest.layout().ty {
+                self.validate_operand(
+                    &dest.transmute(src.layout(), self)?,
+                    M::enforce_validity_recursively(self, src.layout()),
+                    /*reset_provenance_and_padding*/ true,
+                )?;
+            }
             self.validate_operand(
-                &dest.to_op(self)?,
+                &dest,
                 M::enforce_validity_recursively(self, dest.layout()),
+                /*reset_provenance_and_padding*/ true,
             )?;
         }
 
@@ -843,7 +841,7 @@ where
     #[instrument(skip(self), level = "trace")]
     fn copy_op_no_validate(
         &mut self,
-        src: &impl Readable<'tcx, M::Provenance>,
+        src: &impl Projectable<'tcx, M::Provenance>,
         dest: &impl Writeable<'tcx, M::Provenance>,
         allow_transmute: bool,
     ) -> InterpResult<'tcx> {
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 26b7251f6dbc5..fb24f983ca9c3 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -4,6 +4,7 @@
 //! That's useful because it means other passes (e.g. promotion) can rely on `const`s
 //! to be const-safe.
 
+use std::borrow::Cow;
 use std::fmt::Write;
 use std::hash::Hash;
 use std::num::NonZero;
@@ -16,22 +17,22 @@ use rustc_hir as hir;
 use rustc_middle::bug;
 use rustc_middle::mir::interpret::ValidationErrorKind::{self, *};
 use rustc_middle::mir::interpret::{
-    ExpectedKind, InterpError, InvalidMetaKind, Misalignment, PointerKind, Provenance,
+    alloc_range, ExpectedKind, InterpError, InvalidMetaKind, Misalignment, PointerKind, Provenance,
     UnsupportedOpInfo, ValidationErrorInfo,
 };
-use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
-use rustc_middle::ty::{self, Ty};
+use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
+use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_span::symbol::{sym, Symbol};
 use rustc_target::abi::{
-    Abi, FieldIdx, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange,
+    Abi, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange,
 };
 use tracing::trace;
 
 use super::machine::AllocMap;
 use super::{
     err_ub, format_interp_error, throw_ub, AllocId, AllocKind, CheckInAllocMsg, GlobalAlloc, ImmTy,
-    Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Pointer, Projectable,
-    Scalar, ValueVisitor,
+    Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, PlaceTy, Pointer,
+    Projectable, Scalar, ValueVisitor,
 };
 
 // for the validation errors
@@ -125,6 +126,7 @@ pub enum PathElem {
     EnumTag,
     CoroutineTag,
     DynDowncast,
+    Vtable,
 }
 
 /// Extra things to check for during validation of CTFE results.
@@ -163,22 +165,22 @@ impl<T: Clone + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH>
     pub fn empty() -> Self {
         RefTracking { seen: FxHashSet::default(), todo: vec![] }
     }
-    pub fn new(op: T) -> Self {
+    pub fn new(val: T) -> Self {
         let mut ref_tracking_for_consts =
-            RefTracking { seen: FxHashSet::default(), todo: vec![(op.clone(), PATH::default())] };
-        ref_tracking_for_consts.seen.insert(op);
+            RefTracking { seen: FxHashSet::default(), todo: vec![(val.clone(), PATH::default())] };
+        ref_tracking_for_consts.seen.insert(val);
         ref_tracking_for_consts
     }
     pub fn next(&mut self) -> Option<(T, PATH)> {
         self.todo.pop()
     }
 
-    fn track(&mut self, op: T, path: impl FnOnce() -> PATH) {
-        if self.seen.insert(op.clone()) {
-            trace!("Recursing below ptr {:#?}", op);
+    fn track(&mut self, val: T, path: impl FnOnce() -> PATH) {
+        if self.seen.insert(val.clone()) {
+            trace!("Recursing below ptr {:#?}", val);
             let path = path();
             // Remember to come back to this later.
-            self.todo.push((op, path));
+            self.todo.push((val, path));
         }
     }
 }
@@ -204,11 +206,62 @@ fn write_path(out: &mut String, path: &[PathElem]) {
             // not the root.
             Deref => write!(out, ".<deref>"),
             DynDowncast => write!(out, ".<dyn-downcast>"),
+            Vtable => write!(out, ".<vtable>"),
         }
         .unwrap()
     }
 }
 
+/// Represents a set of `Size` values as a sorted list of ranges.
+// These are (offset, length) pairs, and they are sorted and mutually disjoint,
+// and never adjacent (i.e. there's always a gap between two of them).
+#[derive(Debug, Clone)]
+pub struct RangeSet(Vec<(Size, Size)>);
+
+impl RangeSet {
+    fn add_range(&mut self, offset: Size, size: Size) {
+        if size.bytes() == 0 {
+            // No need to track empty ranges.
+            return;
+        }
+        let v = &mut self.0;
+        // We scan for a partition point where the left partition is all the elements that end
+        // strictly before we start. Those are elements that are too "low" to merge with us.
+        let idx =
+            v.partition_point(|&(other_offset, other_size)| other_offset + other_size < offset);
+        // Now we want to either merge with the first element of the second partition, or insert ourselves before that.
+        if let Some(&(other_offset, other_size)) = v.get(idx)
+            && offset + size >= other_offset
+        {
+            // Their end is >= our start (otherwise it would not be in the 2nd partition) and
+            // our end is >= their start. This means we can merge the ranges.
+            let new_start = other_offset.min(offset);
+            let mut new_end = (other_offset + other_size).max(offset + size);
+            // We grew to the right, so merge with overlapping/adjacent elements.
+            // (We also may have grown to the left, but that can never make us adjacent with
+            // anything there since we selected the first such candidate via `partition_point`.)
+            let mut scan_right = 1;
+            while let Some(&(next_offset, next_size)) = v.get(idx + scan_right)
+                && new_end >= next_offset
+            {
+                // Increase our size to absorb the next element.
+                new_end = new_end.max(next_offset + next_size);
+                // Look at the next element.
+                scan_right += 1;
+            }
+            // Update the element we grew.
+            v[idx] = (new_start, new_end - new_start);
+            // Remove the elements we absorbed (if any).
+            if scan_right > 1 {
+                drop(v.drain((idx + 1)..(idx + scan_right)));
+            }
+        } else {
+            // Insert new element.
+            v.insert(idx, (offset, size));
+        }
+    }
+}
+
 struct ValidityVisitor<'rt, 'tcx, M: Machine<'tcx>> {
     /// The `path` may be pushed to, but the part that is present when a function
     /// starts must not be changed!  `visit_fields` and `visit_array` rely on
@@ -217,7 +270,17 @@ struct ValidityVisitor<'rt, 'tcx, M: Machine<'tcx>> {
     ref_tracking: Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>>,
     /// `None` indicates this is not validating for CTFE (but for runtime).
     ctfe_mode: Option<CtfeValidationMode>,
-    ecx: &'rt InterpCx<'tcx, M>,
+    ecx: &'rt mut InterpCx<'tcx, M>,
+    /// Whether provenance should be reset outside of pointers (emulating the effect of a typed
+    /// copy).
+    reset_provenance_and_padding: bool,
+    /// This tracks which byte ranges in this value contain data; the remaining bytes are padding.
+    /// The ideal representation here would be pointer-length pairs, but to keep things more compact
+    /// we only store a (range) set of offsets -- the base pointer is the same throughout the entire
+    /// visit, after all.
+    /// If this is `Some`, then `reset_provenance_and_padding` must be true (but not vice versa:
+    /// we might not track data vs padding bytes if the operand isn't stored in memory anyway).
+    data_bytes: Option<RangeSet>,
 }
 
 impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
@@ -287,8 +350,14 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
             // arrays/slices
             ty::Array(..) | ty::Slice(..) => PathElem::ArrayElem(field),
 
+            // dyn* vtables
+            ty::Dynamic(_, _, ty::DynKind::DynStar) if field == 1 => PathElem::Vtable,
+
             // dyn traits
-            ty::Dynamic(..) => PathElem::DynDowncast,
+            ty::Dynamic(..) => {
+                assert_eq!(field, 0);
+                PathElem::DynDowncast
+            }
 
             // nothing else has an aggregate layout
             _ => bug!("aggregate_field_path_elem: got non-aggregate type {:?}", layout.ty),
@@ -314,11 +383,11 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
 
     fn read_immediate(
         &self,
-        op: &OpTy<'tcx, M::Provenance>,
+        val: &PlaceTy<'tcx, M::Provenance>,
         expected: ExpectedKind,
     ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
         Ok(try_validation!(
-            self.ecx.read_immediate(op),
+            self.ecx.read_immediate(val),
             self.path,
             Ub(InvalidUninitBytes(None)) =>
                 Uninit { expected },
@@ -332,10 +401,40 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
 
     fn read_scalar(
         &self,
-        op: &OpTy<'tcx, M::Provenance>,
+        val: &PlaceTy<'tcx, M::Provenance>,
         expected: ExpectedKind,
     ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
-        Ok(self.read_immediate(op, expected)?.to_scalar())
+        Ok(self.read_immediate(val, expected)?.to_scalar())
+    }
+
+    fn deref_pointer(
+        &mut self,
+        val: &PlaceTy<'tcx, M::Provenance>,
+        expected: ExpectedKind,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+        // Not using `ecx.deref_pointer` since we want to use our `read_immediate` wrapper.
+        let imm = self.read_immediate(val, expected)?;
+        // Reset provenance: ensure slice tail metadata does not preserve provenance,
+        // and ensure all pointers do not preserve partial provenance.
+        if self.reset_provenance_and_padding {
+            if matches!(imm.layout.abi, Abi::Scalar(..)) {
+                // A thin pointer. If it has provenance, we don't have to do anything.
+                // If it does not, ensure we clear the provenance in memory.
+                if matches!(imm.to_scalar(), Scalar::Int(..)) {
+                    self.ecx.clear_provenance(val)?;
+                }
+            } else {
+                // A wide pointer. This means we have to worry both about the pointer itself and the
+                // metadata. We do the lazy thing and just write back the value we got. Just
+                // clearing provenance in a targeted manner would be more efficient, but unless this
+                // is a perf hotspot it's just not worth the effort.
+                self.ecx.write_immediate_no_validate(*imm, val)?;
+            }
+            // The entire thing is data, not padding.
+            self.add_data_range_place(val);
+        }
+        // Now turn it into a place.
+        self.ecx.ref_to_mplace(&imm)
     }
 
     fn check_wide_ptr_meta(
@@ -376,11 +475,10 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
     /// Check a reference or `Box`.
     fn check_safe_pointer(
         &mut self,
-        value: &OpTy<'tcx, M::Provenance>,
+        value: &PlaceTy<'tcx, M::Provenance>,
         ptr_kind: PointerKind,
     ) -> InterpResult<'tcx> {
-        // Not using `deref_pointer` since we want to use our `read_immediate` wrapper.
-        let place = self.ecx.ref_to_mplace(&self.read_immediate(value, ptr_kind.into())?)?;
+        let place = self.deref_pointer(value, ptr_kind.into())?;
         // Handle wide pointers.
         // Check metadata early, for better diagnostics
         if place.layout.is_unsized() {
@@ -564,31 +662,39 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
     /// Note that not all of these have `FieldsShape::Primitive`, e.g. wide references.
     fn try_visit_primitive(
         &mut self,
-        value: &OpTy<'tcx, M::Provenance>,
+        value: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, bool> {
         // Go over all the primitive types
         let ty = value.layout.ty;
         match ty.kind() {
             ty::Bool => {
-                let value = self.read_scalar(value, ExpectedKind::Bool)?;
+                let scalar = self.read_scalar(value, ExpectedKind::Bool)?;
                 try_validation!(
-                    value.to_bool(),
+                    scalar.to_bool(),
                     self.path,
                     Ub(InvalidBool(..)) => ValidationErrorKind::InvalidBool {
-                        value: format!("{value:x}"),
+                        value: format!("{scalar:x}"),
                     }
                 );
+                if self.reset_provenance_and_padding {
+                    self.ecx.clear_provenance(value)?;
+                    self.add_data_range_place(value);
+                }
                 Ok(true)
             }
             ty::Char => {
-                let value = self.read_scalar(value, ExpectedKind::Char)?;
+                let scalar = self.read_scalar(value, ExpectedKind::Char)?;
                 try_validation!(
-                    value.to_char(),
+                    scalar.to_char(),
                     self.path,
                     Ub(InvalidChar(..)) => ValidationErrorKind::InvalidChar {
-                        value: format!("{value:x}"),
+                        value: format!("{scalar:x}"),
                     }
                 );
+                if self.reset_provenance_and_padding {
+                    self.ecx.clear_provenance(value)?;
+                    self.add_data_range_place(value);
+                }
                 Ok(true)
             }
             ty::Float(_) | ty::Int(_) | ty::Uint(_) => {
@@ -602,11 +708,14 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
                         ExpectedKind::Int
                     },
                 )?;
+                if self.reset_provenance_and_padding {
+                    self.ecx.clear_provenance(value)?;
+                    self.add_data_range_place(value);
+                }
                 Ok(true)
             }
             ty::RawPtr(..) => {
-                let place =
-                    self.ecx.ref_to_mplace(&self.read_immediate(value, ExpectedKind::RawPtr)?)?;
+                let place = self.deref_pointer(value, ExpectedKind::RawPtr)?;
                 if place.layout.is_unsized() {
                     self.check_wide_ptr_meta(place.meta(), place.layout)?;
                 }
@@ -617,11 +726,11 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
                 Ok(true)
             }
             ty::FnPtr(..) => {
-                let value = self.read_scalar(value, ExpectedKind::FnPtr)?;
+                let scalar = self.read_scalar(value, ExpectedKind::FnPtr)?;
 
                 // If we check references recursively, also check that this points to a function.
                 if let Some(_) = self.ref_tracking {
-                    let ptr = value.to_pointer(self.ecx)?;
+                    let ptr = scalar.to_pointer(self.ecx)?;
                     let _fn = try_validation!(
                         self.ecx.get_ptr_fn(ptr),
                         self.path,
@@ -631,10 +740,18 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
                     // FIXME: Check if the signature matches
                 } else {
                     // Otherwise (for standalone Miri), we have to still check it to be non-null.
-                    if self.ecx.scalar_may_be_null(value)? {
+                    if self.ecx.scalar_may_be_null(scalar)? {
                         throw_validation_failure!(self.path, NullFnPtr);
                     }
                 }
+                if self.reset_provenance_and_padding {
+                    // Make sure we do not preserve partial provenance. This matches the thin
+                    // pointer handling in `deref_pointer`.
+                    if matches!(scalar, Scalar::Int(..)) {
+                        self.ecx.clear_provenance(value)?;
+                    }
+                    self.add_data_range_place(value);
+                }
                 Ok(true)
             }
             ty::Never => throw_validation_failure!(self.path, NeverVal),
@@ -716,13 +833,178 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
         }
     }
 
-    fn in_mutable_memory(&self, op: &OpTy<'tcx, M::Provenance>) -> bool {
-        if let Some(mplace) = op.as_mplace_or_imm().left() {
+    fn in_mutable_memory(&self, val: &PlaceTy<'tcx, M::Provenance>) -> bool {
+        if let Some(mplace) = val.as_mplace_or_local().left() {
             if let Some(alloc_id) = mplace.ptr().provenance.and_then(|p| p.get_alloc_id()) {
-                return mutability(self.ecx, alloc_id).is_mut();
+                mutability(self.ecx, alloc_id).is_mut()
+            } else {
+                // No memory at all.
+                false
+            }
+        } else {
+            // A local variable -- definitely mutable.
+            true
+        }
+    }
+
+    /// Add the given pointer-length pair to the "data" range of this visit.
+    fn add_data_range(&mut self, ptr: Pointer<Option<M::Provenance>>, size: Size) {
+        if let Some(data_bytes) = self.data_bytes.as_mut() {
+            // We only have to store the offset, the rest is the same for all pointers here.
+            let (_prov, offset) = ptr.into_parts();
+            // Add this.
+            data_bytes.add_range(offset, size);
+        };
+    }
+
+    /// Add the entire given place to the "data" range of this visit.
+    fn add_data_range_place(&mut self, place: &PlaceTy<'tcx, M::Provenance>) {
+        // Only sized places can be added this way.
+        debug_assert!(place.layout.abi.is_sized());
+        if let Some(data_bytes) = self.data_bytes.as_mut() {
+            let offset = Self::data_range_offset(self.ecx, place);
+            data_bytes.add_range(offset, place.layout.size);
+        }
+    }
+
+    /// Convert a place into the offset it starts at, for the purpose of data_range tracking.
+    /// Must only be called if `data_bytes` is `Some(_)`.
+    fn data_range_offset(ecx: &InterpCx<'tcx, M>, place: &PlaceTy<'tcx, M::Provenance>) -> Size {
+        // The presence of `data_bytes` implies that our place is in memory.
+        let ptr = ecx
+            .place_to_op(place)
+            .expect("place must be in memory")
+            .as_mplace_or_imm()
+            .expect_left("place must be in memory")
+            .ptr();
+        let (_prov, offset) = ptr.into_parts();
+        offset
+    }
+
+    fn reset_padding(&mut self, place: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+        let Some(data_bytes) = self.data_bytes.as_mut() else { return Ok(()) };
+        // Our value must be in memory, otherwise we would not have set up `data_bytes`.
+        let mplace = self.ecx.force_allocation(place)?;
+        // Determine starting offset and size.
+        let (_prov, start_offset) = mplace.ptr().into_parts();
+        let (size, _align) = self
+            .ecx
+            .size_and_align_of_mplace(&mplace)?
+            .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
+        // If there is no padding at all, we can skip the rest: check for
+        // a single data range covering the entire value.
+        if data_bytes.0 == &[(start_offset, size)] {
+            return Ok(());
+        }
+        // Get a handle for the allocation. Do this only once, to avoid looking up the same
+        // allocation over and over again. (Though to be fair, iterating the value already does
+        // exactly that.)
+        let Some(mut alloc) = self.ecx.get_ptr_alloc_mut(mplace.ptr(), size)? else {
+            // A ZST, no padding to clear.
+            return Ok(());
+        };
+        // Add a "finalizer" data range at the end, so that the iteration below finds all gaps
+        // between ranges.
+        data_bytes.0.push((start_offset + size, Size::ZERO));
+        // Iterate, and reset gaps.
+        let mut padding_cleared_until = start_offset;
+        for &(offset, size) in data_bytes.0.iter() {
+            assert!(
+                offset >= padding_cleared_until,
+                "reset_padding on {}: previous field ended at offset {}, next field starts at {} (and has a size of {} bytes)",
+                mplace.layout.ty,
+                (padding_cleared_until - start_offset).bytes(),
+                (offset - start_offset).bytes(),
+                size.bytes(),
+            );
+            if offset > padding_cleared_until {
+                // We found padding. Adjust the range to be relative to `alloc`, and make it uninit.
+                let padding_start = padding_cleared_until - start_offset;
+                let padding_size = offset - padding_cleared_until;
+                let range = alloc_range(padding_start, padding_size);
+                trace!("reset_padding on {}: resetting padding range {range:?}", mplace.layout.ty);
+                alloc.write_uninit(range)?;
+            }
+            padding_cleared_until = offset + size;
+        }
+        assert!(padding_cleared_until == start_offset + size);
+        Ok(())
+    }
+
+    /// Computes the data range of this union type:
+    /// which bytes are inside a field (i.e., not padding.)
+    fn union_data_range<'e>(
+        ecx: &'e mut InterpCx<'tcx, M>,
+        layout: TyAndLayout<'tcx>,
+    ) -> Cow<'e, RangeSet> {
+        assert!(layout.ty.is_union());
+        assert!(layout.abi.is_sized(), "there are no unsized unions");
+        let layout_cx = LayoutCx { tcx: *ecx.tcx, param_env: ecx.param_env };
+        return M::cached_union_data_range(ecx, layout.ty, || {
+            let mut out = RangeSet(Vec::new());
+            union_data_range_uncached(&layout_cx, layout, Size::ZERO, &mut out);
+            out
+        });
+
+        /// Helper for recursive traversal: add data ranges of the given type to `out`.
+        fn union_data_range_uncached<'tcx>(
+            cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
+            layout: TyAndLayout<'tcx>,
+            base_offset: Size,
+            out: &mut RangeSet,
+        ) {
+            // If this is a ZST, we don't contain any data. In particular, this helps us to quickly
+            // skip over huge arrays of ZST.
+            if layout.is_zst() {
+                return;
+            }
+            // Just recursively add all the fields of everything to the output.
+            match &layout.fields {
+                FieldsShape::Primitive => {
+                    out.add_range(base_offset, layout.size);
+                }
+                &FieldsShape::Union(fields) => {
+                    // Currently, all fields start at offset 0 (relative to `base_offset`).
+                    for field in 0..fields.get() {
+                        let field = layout.field(cx, field);
+                        union_data_range_uncached(cx, field, base_offset, out);
+                    }
+                }
+                &FieldsShape::Array { stride, count } => {
+                    let elem = layout.field(cx, 0);
+
+                    // Fast-path for large arrays of simple types that do not contain any padding.
+                    if elem.abi.is_scalar() {
+                        out.add_range(base_offset, elem.size * count);
+                    } else {
+                        for idx in 0..count {
+                            // This repeats the same computation for every array element... but the alternative
+                            // is to allocate temporary storage for a dedicated `out` set for the array element,
+                            // and replicating that N times. Is that better?
+                            union_data_range_uncached(cx, elem, base_offset + idx * stride, out);
+                        }
+                    }
+                }
+                FieldsShape::Arbitrary { offsets, .. } => {
+                    for (field, &offset) in offsets.iter_enumerated() {
+                        let field = layout.field(cx, field.as_usize());
+                        union_data_range_uncached(cx, field, base_offset + offset, out);
+                    }
+                }
+            }
+            // Don't forget potential other variants.
+            match &layout.variants {
+                Variants::Single { .. } => {
+                    // Fully handled above.
+                }
+                Variants::Multiple { variants, .. } => {
+                    for variant in variants.indices() {
+                        let variant = layout.for_variant(cx, variant);
+                        union_data_range_uncached(cx, variant, base_offset, out);
+                    }
+                }
             }
         }
-        false
     }
 }
 
@@ -774,7 +1056,7 @@ fn mutability<'tcx>(ecx: &InterpCx<'tcx, impl Machine<'tcx>>, alloc_id: AllocId)
 }
 
 impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt, 'tcx, M> {
-    type V = OpTy<'tcx, M::Provenance>;
+    type V = PlaceTy<'tcx, M::Provenance>;
 
     #[inline(always)]
     fn ecx(&self) -> &InterpCx<'tcx, M> {
@@ -783,11 +1065,11 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
 
     fn read_discriminant(
         &mut self,
-        op: &OpTy<'tcx, M::Provenance>,
+        val: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, VariantIdx> {
         self.with_elem(PathElem::EnumTag, move |this| {
             Ok(try_validation!(
-                this.ecx.read_discriminant(op),
+                this.ecx.read_discriminant(val),
                 this.path,
                 Ub(InvalidTag(val)) => InvalidEnumTag {
                     value: format!("{val:x}"),
@@ -802,44 +1084,54 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
     #[inline]
     fn visit_field(
         &mut self,
-        old_op: &OpTy<'tcx, M::Provenance>,
+        old_val: &PlaceTy<'tcx, M::Provenance>,
         field: usize,
-        new_op: &OpTy<'tcx, M::Provenance>,
+        new_val: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
-        let elem = self.aggregate_field_path_elem(old_op.layout, field);
-        self.with_elem(elem, move |this| this.visit_value(new_op))
+        let elem = self.aggregate_field_path_elem(old_val.layout, field);
+        self.with_elem(elem, move |this| this.visit_value(new_val))
     }
 
     #[inline]
     fn visit_variant(
         &mut self,
-        old_op: &OpTy<'tcx, M::Provenance>,
+        old_val: &PlaceTy<'tcx, M::Provenance>,
         variant_id: VariantIdx,
-        new_op: &OpTy<'tcx, M::Provenance>,
+        new_val: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
-        let name = match old_op.layout.ty.kind() {
+        let name = match old_val.layout.ty.kind() {
             ty::Adt(adt, _) => PathElem::Variant(adt.variant(variant_id).name),
             // Coroutines also have variants
             ty::Coroutine(..) => PathElem::CoroutineState(variant_id),
-            _ => bug!("Unexpected type with variant: {:?}", old_op.layout.ty),
+            _ => bug!("Unexpected type with variant: {:?}", old_val.layout.ty),
         };
-        self.with_elem(name, move |this| this.visit_value(new_op))
+        self.with_elem(name, move |this| this.visit_value(new_val))
     }
 
     #[inline(always)]
     fn visit_union(
         &mut self,
-        op: &OpTy<'tcx, M::Provenance>,
+        val: &PlaceTy<'tcx, M::Provenance>,
         _fields: NonZero<usize>,
     ) -> InterpResult<'tcx> {
         // Special check for CTFE validation, preventing `UnsafeCell` inside unions in immutable memory.
         if self.ctfe_mode.is_some_and(|c| !c.allow_immutable_unsafe_cell()) {
-            if !op.layout.is_zst() && !op.layout.ty.is_freeze(*self.ecx.tcx, self.ecx.param_env) {
-                if !self.in_mutable_memory(op) {
+            if !val.layout.is_zst() && !val.layout.ty.is_freeze(*self.ecx.tcx, self.ecx.param_env) {
+                if !self.in_mutable_memory(val) {
                     throw_validation_failure!(self.path, UnsafeCellInImmutable);
                 }
             }
         }
+        if self.reset_provenance_and_padding
+            && let Some(data_bytes) = self.data_bytes.as_mut()
+        {
+            let base_offset = Self::data_range_offset(self.ecx, val);
+            // Determine and add data range for this union.
+            let union_data_range = Self::union_data_range(self.ecx, val.layout);
+            for &(offset, size) in union_data_range.0.iter() {
+                data_bytes.add_range(base_offset + offset, size);
+            }
+        }
         Ok(())
     }
 
@@ -847,39 +1139,41 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
     fn visit_box(
         &mut self,
         _box_ty: Ty<'tcx>,
-        op: &OpTy<'tcx, M::Provenance>,
+        val: &PlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
-        self.check_safe_pointer(op, PointerKind::Box)?;
+        self.check_safe_pointer(val, PointerKind::Box)?;
         Ok(())
     }
 
     #[inline]
-    fn visit_value(&mut self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
-        trace!("visit_value: {:?}, {:?}", *op, op.layout);
+    fn visit_value(&mut self, val: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+        trace!("visit_value: {:?}, {:?}", *val, val.layout);
 
         // Check primitive types -- the leaves of our recursive descent.
+        // This is called even for enum discriminants (which are "fields" of their enum),
+        // so for integer-typed discriminants the provenance reset will happen here.
         // We assume that the Scalar validity range does not restrict these values
         // any further than `try_visit_primitive` does!
-        if self.try_visit_primitive(op)? {
+        if self.try_visit_primitive(val)? {
             return Ok(());
         }
 
         // Special check preventing `UnsafeCell` in the inner part of constants
         if self.ctfe_mode.is_some_and(|c| !c.allow_immutable_unsafe_cell()) {
-            if !op.layout.is_zst()
-                && let Some(def) = op.layout.ty.ty_adt_def()
+            if !val.layout.is_zst()
+                && let Some(def) = val.layout.ty.ty_adt_def()
                 && def.is_unsafe_cell()
             {
-                if !self.in_mutable_memory(op) {
+                if !self.in_mutable_memory(val) {
                     throw_validation_failure!(self.path, UnsafeCellInImmutable);
                 }
             }
         }
 
         // Recursively walk the value at its type. Apply optimizations for some large types.
-        match op.layout.ty.kind() {
+        match val.layout.ty.kind() {
             ty::Str => {
-                let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate
+                let mplace = val.assert_mem_place(); // strings are unsized and hence never immediate
                 let len = mplace.len(self.ecx)?;
                 try_validation!(
                     self.ecx.read_bytes_ptr_strip_provenance(mplace.ptr(), Size::from_bytes(len)),
@@ -889,11 +1183,10 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
                 );
             }
             ty::Array(tys, ..) | ty::Slice(tys)
-                // This optimization applies for types that can hold arbitrary bytes (such as
-                // integer and floating point types) or for structs or tuples with no fields.
-                // FIXME(wesleywiser) This logic could be extended further to arbitrary structs
-                // or tuples made up of integer/floating point types or inhabited ZSTs with no
-                // padding.
+                // This optimization applies for types that can hold arbitrary non-provenance bytes (such as
+                // integer and floating point types).
+                // FIXME(wesleywiser) This logic could be extended further to arbitrary structs or
+                // tuples made up of integer/floating point types or inhabited ZSTs with no padding.
                 if matches!(tys.kind(), ty::Int(..) | ty::Uint(..) | ty::Float(..))
                 =>
             {
@@ -901,18 +1194,19 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
                 // Optimized handling for arrays of integer/float type.
 
                 // This is the length of the array/slice.
-                let len = op.len(self.ecx)?;
+                let len = val.len(self.ecx)?;
                 // This is the element type size.
                 let layout = self.ecx.layout_of(*tys)?;
                 // This is the size in bytes of the whole array. (This checks for overflow.)
                 let size = layout.size * len;
                 // If the size is 0, there is nothing to check.
-                // (`size` can only be 0 of `len` is 0, and empty arrays are always valid.)
+                // (`size` can only be 0 if `len` is 0, and empty arrays are always valid.)
                 if size == Size::ZERO {
                     return Ok(());
                 }
-                // Now that we definitely have a non-ZST array, we know it lives in memory.
-                let mplace = match op.as_mplace_or_imm() {
+                // Now that we definitely have a non-ZST array, we know it lives in memory -- except it may
+                // be an uninitialized local variable, those are also "immediate".
+                let mplace = match val.to_op(self.ecx)?.as_mplace_or_imm() {
                     Left(mplace) => mplace,
                     Right(imm) => match *imm {
                         Immediate::Uninit =>
@@ -958,20 +1252,30 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
                         }
                     }
                 }
+
+                // Don't forget that these are all non-pointer types, and thus do not preserve
+                // provenance.
+                if self.reset_provenance_and_padding {
+                    // We can't share this with above as above, we might be looking at read-only memory.
+                    let mut alloc = self.ecx.get_ptr_alloc_mut(mplace.ptr(), size)?.expect("we already excluded size 0");
+                    alloc.clear_provenance()?;
+                    // Also, mark this as containing data, not padding.
+                    self.add_data_range(mplace.ptr(), size);
+                }
             }
             // Fast path for arrays and slices of ZSTs. We only need to check a single ZST element
             // of an array and not all of them, because there's only a single value of a specific
             // ZST type, so either validation fails for all elements or none.
             ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(*tys)?.is_zst() => {
                 // Validate just the first element (if any).
-                if op.len(self.ecx)? > 0 {
-                    self.visit_field(op, 0, &self.ecx.project_index(op, 0)?)?;
+                if val.len(self.ecx)? > 0 {
+                    self.visit_field(val, 0, &self.ecx.project_index(val, 0)?)?;
                 }
             }
             _ => {
                 // default handler
                 try_validation!(
-                    self.walk_value(op),
+                    self.walk_value(val),
                     self.path,
                     // It's not great to catch errors here, since we can't give a very good path,
                     // but it's better than ICEing.
@@ -992,15 +1296,15 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
         // FIXME: We could avoid some redundant checks here. For newtypes wrapping
         // scalars, we do the same check on every "level" (e.g., first we check
         // MyNewtype and then the scalar in there).
-        match op.layout.abi {
+        match val.layout.abi {
             Abi::Uninhabited => {
-                let ty = op.layout.ty;
+                let ty = val.layout.ty;
                 throw_validation_failure!(self.path, UninhabitedVal { ty });
             }
             Abi::Scalar(scalar_layout) => {
                 if !scalar_layout.is_uninit_valid() {
                     // There is something to check here.
-                    let scalar = self.read_scalar(op, ExpectedKind::InitScalar)?;
+                    let scalar = self.read_scalar(val, ExpectedKind::InitScalar)?;
                     self.visit_scalar(scalar, scalar_layout)?;
                 }
             }
@@ -1010,7 +1314,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
                 // the other must be init.
                 if !a_layout.is_uninit_valid() && !b_layout.is_uninit_valid() {
                     let (a, b) =
-                        self.read_immediate(op, ExpectedKind::InitScalar)?.to_scalar_pair();
+                        self.read_immediate(val, ExpectedKind::InitScalar)?.to_scalar_pair();
                     self.visit_scalar(a, a_layout)?;
                     self.visit_scalar(b, b_layout)?;
                 }
@@ -1031,19 +1335,34 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
 
 impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     fn validate_operand_internal(
-        &self,
-        op: &OpTy<'tcx, M::Provenance>,
+        &mut self,
+        val: &PlaceTy<'tcx, M::Provenance>,
         path: Vec<PathElem>,
         ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>>,
         ctfe_mode: Option<CtfeValidationMode>,
+        reset_provenance_and_padding: bool,
     ) -> InterpResult<'tcx> {
-        trace!("validate_operand_internal: {:?}, {:?}", *op, op.layout.ty);
-
-        // Construct a visitor
-        let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self };
+        trace!("validate_operand_internal: {:?}, {:?}", *val, val.layout.ty);
 
-        // Run it.
-        match self.run_for_validation(|| visitor.visit_value(op)) {
+        // Run the visitor.
+        match self.run_for_validation(|ecx| {
+            let reset_padding = reset_provenance_and_padding && {
+                // Check if `val` is actually stored in memory. If not, padding is not even
+                // represented and we need not reset it.
+                ecx.place_to_op(val)?.as_mplace_or_imm().is_left()
+            };
+            let mut v = ValidityVisitor {
+                path,
+                ref_tracking,
+                ctfe_mode,
+                ecx,
+                reset_provenance_and_padding,
+                data_bytes: reset_padding.then_some(RangeSet(Vec::new())),
+            };
+            v.visit_value(val)?;
+            v.reset_padding(val)?;
+            InterpResult::Ok(())
+        }) {
             Ok(()) => Ok(()),
             // Pass through validation failures and "invalid program" issues.
             Err(err)
@@ -1079,13 +1398,19 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// - no `UnsafeCell` or non-ZST `&mut`.
     #[inline(always)]
     pub(crate) fn const_validate_operand(
-        &self,
-        op: &OpTy<'tcx, M::Provenance>,
+        &mut self,
+        val: &PlaceTy<'tcx, M::Provenance>,
         path: Vec<PathElem>,
         ref_tracking: &mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>,
         ctfe_mode: CtfeValidationMode,
     ) -> InterpResult<'tcx> {
-        self.validate_operand_internal(op, path, Some(ref_tracking), Some(ctfe_mode))
+        self.validate_operand_internal(
+            val,
+            path,
+            Some(ref_tracking),
+            Some(ctfe_mode),
+            /*reset_provenance*/ false,
+        )
     }
 
     /// This function checks the data at `op` to be runtime-valid.
@@ -1093,21 +1418,41 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// It will error if the bits at the destination do not match the ones described by the layout.
     #[inline(always)]
     pub fn validate_operand(
-        &self,
-        op: &OpTy<'tcx, M::Provenance>,
+        &mut self,
+        val: &PlaceTy<'tcx, M::Provenance>,
         recursive: bool,
+        reset_provenance_and_padding: bool,
     ) -> InterpResult<'tcx> {
         // Note that we *could* actually be in CTFE here with `-Zextra-const-ub-checks`, but it's
         // still correct to not use `ctfe_mode`: that mode is for validation of the final constant
         // value, it rules out things like `UnsafeCell` in awkward places.
         if !recursive {
-            return self.validate_operand_internal(op, vec![], None, None);
+            return self.validate_operand_internal(
+                val,
+                vec![],
+                None,
+                None,
+                reset_provenance_and_padding,
+            );
         }
         // Do a recursive check.
         let mut ref_tracking = RefTracking::empty();
-        self.validate_operand_internal(op, vec![], Some(&mut ref_tracking), None)?;
+        self.validate_operand_internal(
+            val,
+            vec![],
+            Some(&mut ref_tracking),
+            None,
+            reset_provenance_and_padding,
+        )?;
         while let Some((mplace, path)) = ref_tracking.todo.pop() {
-            self.validate_operand_internal(&mplace.into(), path, Some(&mut ref_tracking), None)?;
+            // Things behind reference do *not* have the provenance reset.
+            self.validate_operand_internal(
+                &mplace.into(),
+                path,
+                Some(&mut ref_tracking),
+                None,
+                /*reset_provenance_and_padding*/ false,
+            )?;
         }
         Ok(())
     }
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
index b02f12e3c7f0b..d8af67bd0e705 100644
--- a/compiler/rustc_const_eval/src/interpret/visitor.rs
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -5,6 +5,7 @@ use std::num::NonZero;
 
 use rustc_index::IndexVec;
 use rustc_middle::mir::interpret::InterpResult;
+use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::ty::{self, Ty};
 use rustc_target::abi::{FieldIdx, FieldsShape, VariantIdx, Variants};
 use tracing::trace;
@@ -82,6 +83,7 @@ pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized {
         self.visit_value(new_val)
     }
 
+    /// Traversal logic; should not be overloaded.
     fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx> {
         let ty = v.layout().ty;
         trace!("walk_value: type: {ty}");
@@ -104,6 +106,17 @@ pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized {
                 // DynStar types. Very different from a dyn type (but strangely part of the
                 // same variant in `TyKind`): These are pairs where the 2nd component is the
                 // vtable, and the first component is the data (which must be ptr-sized).
+
+                // First make sure the vtable can be read at its type.
+                // The type of this vtable is fake, it claims to be a reference to some actual memory but that isn't true.
+                // So we transmute it to a raw pointer.
+                let raw_ptr_ty = Ty::new_mut_ptr(*self.ecx().tcx, self.ecx().tcx.types.unit);
+                let raw_ptr_ty = self.ecx().layout_of(raw_ptr_ty)?;
+                let vtable_field =
+                    self.ecx().project_field(v, 1)?.transmute(raw_ptr_ty, self.ecx())?;
+                self.visit_field(v, 1, &vtable_field)?;
+
+                // Then unpack the first field, and continue.
                 let data = self.ecx().unpack_dyn_star(v, data)?;
                 return self.visit_field(v, 0, &data);
             }
diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
index cbd1fdeea2aa3..611a8e1a88497 100644
--- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
+++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
@@ -4,7 +4,7 @@ use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt};
 use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants};
 
 use crate::const_eval::{CanAccessMutGlobal, CheckAlignment, CompileTimeMachine};
-use crate::interpret::{InterpCx, MemoryKind, OpTy};
+use crate::interpret::{InterpCx, MemoryKind};
 
 /// Determines if this type permits "raw" initialization by just transmuting some memory into an
 /// instance of `T`.
@@ -32,15 +32,15 @@ pub fn check_validity_requirement<'tcx>(
 
     let layout_cx = LayoutCx { tcx, param_env: param_env_and_ty.param_env };
     if kind == ValidityRequirement::Uninit || tcx.sess.opts.unstable_opts.strict_init_checks {
-        might_permit_raw_init_strict(layout, &layout_cx, kind)
+        check_validity_requirement_strict(layout, &layout_cx, kind)
     } else {
-        might_permit_raw_init_lax(layout, &layout_cx, kind)
+        check_validity_requirement_lax(layout, &layout_cx, kind)
     }
 }
 
-/// Implements the 'strict' version of the `might_permit_raw_init` checks; see that function for
-/// details.
-fn might_permit_raw_init_strict<'tcx>(
+/// Implements the 'strict' version of the [`check_validity_requirement`] checks; see that function
+/// for details.
+fn check_validity_requirement_strict<'tcx>(
     ty: TyAndLayout<'tcx>,
     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
     kind: ValidityRequirement,
@@ -61,18 +61,24 @@ fn might_permit_raw_init_strict<'tcx>(
         .expect("failed to write bytes for zero valid check");
     }
 
-    let ot: OpTy<'_, _> = allocated.into();
-
     // Assume that if it failed, it's a validation failure.
     // This does *not* actually check that references are dereferenceable, but since all types that
     // require dereferenceability also require non-null, we don't actually get any false negatives
     // due to this.
-    Ok(cx.validate_operand(&ot, /*recursive*/ false).is_ok())
+    // The value we are validating is temporary and discarded at the end of this function, so
+    // there is no point in reseting provenance and padding.
+    Ok(cx
+        .validate_operand(
+            &allocated.into(),
+            /*recursive*/ false,
+            /*reset_provenance_and_padding*/ false,
+        )
+        .is_ok())
 }
 
-/// Implements the 'lax' (default) version of the `might_permit_raw_init` checks; see that function for
-/// details.
-fn might_permit_raw_init_lax<'tcx>(
+/// Implements the 'lax' (default) version of the [`check_validity_requirement`] checks; see that
+/// function for details.
+fn check_validity_requirement_lax<'tcx>(
     this: TyAndLayout<'tcx>,
     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
     init_kind: ValidityRequirement,
@@ -137,7 +143,7 @@ fn might_permit_raw_init_lax<'tcx>(
         }
         FieldsShape::Arbitrary { offsets, .. } => {
             for idx in 0..offsets.len() {
-                if !might_permit_raw_init_lax(this.field(cx, idx), cx, init_kind)? {
+                if !check_validity_requirement_lax(this.field(cx, idx), cx, init_kind)? {
                     // We found a field that is unhappy with this kind of initialization.
                     return Ok(false);
                 }
diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs
index 57c47d29857c3..b01c24cf3053e 100644
--- a/compiler/rustc_hir/src/hir.rs
+++ b/compiler/rustc_hir/src/hir.rs
@@ -168,6 +168,19 @@ impl Lifetime {
             (LifetimeSuggestionPosition::Normal, self.ident.span)
         }
     }
+
+    pub fn suggestion(&self, new_lifetime: &str) -> (Span, String) {
+        debug_assert!(new_lifetime.starts_with('\''));
+        let (pos, span) = self.suggestion_position();
+        let code = match pos {
+            LifetimeSuggestionPosition::Normal => format!("{new_lifetime}"),
+            LifetimeSuggestionPosition::Ampersand => format!("{new_lifetime} "),
+            LifetimeSuggestionPosition::ElidedPath => format!("<{new_lifetime}>"),
+            LifetimeSuggestionPosition::ElidedPathArgument => format!("{new_lifetime}, "),
+            LifetimeSuggestionPosition::ObjectDefault => format!("+ {new_lifetime}"),
+        };
+        (span, code)
+    }
 }
 
 /// A `Path` is essentially Rust's notion of a name; for instance,
diff --git a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
index fe46eb583f1df..b4cbd1f309c97 100644
--- a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
+++ b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
@@ -1191,23 +1191,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
                                     (generics.span, "<'a>".to_owned())
                                 };
 
-                            let lifetime_sugg = match lifetime_ref.suggestion_position() {
-                                (hir::LifetimeSuggestionPosition::Normal, span) => {
-                                    (span, "'a".to_owned())
-                                }
-                                (hir::LifetimeSuggestionPosition::Ampersand, span) => {
-                                    (span, "'a ".to_owned())
-                                }
-                                (hir::LifetimeSuggestionPosition::ElidedPath, span) => {
-                                    (span, "<'a>".to_owned())
-                                }
-                                (hir::LifetimeSuggestionPosition::ElidedPathArgument, span) => {
-                                    (span, "'a, ".to_owned())
-                                }
-                                (hir::LifetimeSuggestionPosition::ObjectDefault, span) => {
-                                    (span, "+ 'a".to_owned())
-                                }
-                            };
+                            let lifetime_sugg = lifetime_ref.suggestion("'a");
                             let suggestions = vec![lifetime_sugg, new_param_sugg];
 
                             diag.span_label(
diff --git a/compiler/rustc_index/src/interval.rs b/compiler/rustc_index/src/interval.rs
index 503470f896d09..34f541a8cc639 100644
--- a/compiler/rustc_index/src/interval.rs
+++ b/compiler/rustc_index/src/interval.rs
@@ -17,7 +17,7 @@ mod tests;
 /// first value of the following element.
 #[derive(Debug, Clone)]
 pub struct IntervalSet<I> {
-    // Start, end
+    // Start, end (both inclusive)
     map: SmallVec<[(u32, u32); 2]>,
     domain: usize,
     _data: PhantomData<I>,
diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs
index 824a1868c55df..7de92a43a9ab4 100644
--- a/compiler/rustc_lint/src/builtin.rs
+++ b/compiler/rustc_lint/src/builtin.rs
@@ -426,12 +426,6 @@ impl MissingDoc {
         article: &'static str,
         desc: &'static str,
     ) {
-        // If we're building a test harness, then warning about
-        // documentation is probably not really relevant right now.
-        if cx.sess().opts.test {
-            return;
-        }
-
         // Only check publicly-visible items, using the result from the privacy pass.
         // It's an option so the crate root can also use this function (it doesn't
         // have a `NodeId`).
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
index 5fb8af576ae93..cd56d0edc0585 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -644,6 +644,12 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes>
         return Ok(());
     }
 
+    /// Remove all provenance in the given memory range.
+    pub fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
+        self.provenance.clear(range, cx)?;
+        return Ok(());
+    }
+
     /// Applies a previously prepared provenance copy.
     /// The affected range, as defined in the parameters to `provenance().prepare_copy` is expected
     /// to be clear of provenance.
diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs
index 84c17b39a623e..989f03d3d1399 100644
--- a/compiler/rustc_middle/src/mir/interpret/value.rs
+++ b/compiler/rustc_middle/src/mir/interpret/value.rs
@@ -307,6 +307,13 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
         }
     }
 
+    pub fn clear_provenance(&mut self) -> InterpResult<'tcx> {
+        if matches!(self, Scalar::Ptr(..)) {
+            *self = self.to_scalar_int()?.into();
+        }
+        Ok(())
+    }
+
     #[inline(always)]
     pub fn to_scalar_int(self) -> InterpResult<'tcx, ScalarInt> {
         self.try_to_scalar_int().map_err(|_| err_unsup!(ReadPointerAsInt(None)).into())
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
index 5334e7677664d..56fcfe8e798b1 100644
--- a/compiler/rustc_middle/src/ty/context.rs
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -181,6 +181,10 @@ impl<'tcx> Interner for TyCtxt<'tcx> {
         }
     }
 
+    fn evaluation_is_concurrent(&self) -> bool {
+        self.sess.threads() > 1
+    }
+
     fn expand_abstract_consts<T: TypeFoldable<TyCtxt<'tcx>>>(self, t: T) -> T {
         self.expand_abstract_consts(t)
     }
diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs
index 1f4f2c62d7084..730ba265b19d6 100644
--- a/compiler/rustc_middle/src/ty/sty.rs
+++ b/compiler/rustc_middle/src/ty/sty.rs
@@ -1136,6 +1136,7 @@ impl<'tcx> Ty<'tcx> {
     }
 
     /// Tests if this is any kind of primitive pointer type (reference, raw pointer, fn pointer).
+    /// `Box` is *not* considered a pointer here!
     #[inline]
     pub fn is_any_ptr(self) -> bool {
         self.is_ref() || self.is_unsafe_ptr() || self.is_fn_ptr()
diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs
index 7572d57309c88..d5240a05310b6 100644
--- a/compiler/rustc_span/src/symbol.rs
+++ b/compiler/rustc_span/src/symbol.rs
@@ -1236,6 +1236,7 @@ symbols! {
         mir_unwind_unreachable,
         mir_variant,
         miri,
+        missing_docs,
         mmx_reg,
         modifiers,
         module,
@@ -1255,6 +1256,7 @@ symbols! {
         mut_preserve_binding_mode_2024,
         mut_ref,
         naked,
+        naked_asm,
         naked_functions,
         name,
         names,
diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs
index 0372105a02b70..38d06f53fa632 100644
--- a/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs
+++ b/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs
@@ -852,18 +852,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> {
         impl<'hir, 'tcx> hir::intravisit::Visitor<'hir> for LifetimeReplaceVisitor<'tcx, '_> {
             fn visit_lifetime(&mut self, lt: &'hir hir::Lifetime) {
                 if lt.res == self.needle {
-                    let (pos, span) = lt.suggestion_position();
-                    let new_lt = &self.new_lt;
-                    let sugg = match pos {
-                        hir::LifetimeSuggestionPosition::Normal => format!("{new_lt}"),
-                        hir::LifetimeSuggestionPosition::Ampersand => format!("{new_lt} "),
-                        hir::LifetimeSuggestionPosition::ElidedPath => format!("<{new_lt}>"),
-                        hir::LifetimeSuggestionPosition::ElidedPathArgument => {
-                            format!("{new_lt}, ")
-                        }
-                        hir::LifetimeSuggestionPosition::ObjectDefault => format!("+ {new_lt}"),
-                    };
-                    self.add_lt_suggs.push((span, sugg));
+                    self.add_lt_suggs.push(lt.suggestion(self.new_lt));
                 }
             }
 
diff --git a/compiler/rustc_type_ir/src/elaborate.rs b/compiler/rustc_type_ir/src/elaborate.rs
index 433c444e701cc..f30419c801f18 100644
--- a/compiler/rustc_type_ir/src/elaborate.rs
+++ b/compiler/rustc_type_ir/src/elaborate.rs
@@ -237,7 +237,7 @@ pub fn supertrait_def_ids<I: Interner>(
     cx: I,
     trait_def_id: I::DefId,
 ) -> impl Iterator<Item = I::DefId> {
-    let mut set: HashSet<I::DefId> = HashSet::default();
+    let mut set = HashSet::default();
     let mut stack = vec![trait_def_id];
 
     set.insert(trait_def_id);
diff --git a/compiler/rustc_type_ir/src/interner.rs b/compiler/rustc_type_ir/src/interner.rs
index f2492ede4f5ea..8dec2133a45dd 100644
--- a/compiler/rustc_type_ir/src/interner.rs
+++ b/compiler/rustc_type_ir/src/interner.rs
@@ -137,6 +137,8 @@ pub trait Interner:
         f: impl FnOnce(&mut search_graph::GlobalCache<Self>) -> R,
     ) -> R;
 
+    fn evaluation_is_concurrent(&self) -> bool;
+
     fn expand_abstract_consts<T: TypeFoldable<Self>>(self, t: T) -> T;
 
     type GenericsOf: GenericsOf<Self>;
@@ -404,4 +406,7 @@ impl<I: Interner> search_graph::Cx for I {
     ) -> R {
         I::with_global_cache(self, mode, f)
     }
+    fn evaluation_is_concurrent(&self) -> bool {
+        self.evaluation_is_concurrent()
+    }
 }
diff --git a/compiler/rustc_type_ir/src/search_graph/global_cache.rs b/compiler/rustc_type_ir/src/search_graph/global_cache.rs
index 47f7cefac6ad1..0ce927b58bb38 100644
--- a/compiler/rustc_type_ir/src/search_graph/global_cache.rs
+++ b/compiler/rustc_type_ir/src/search_graph/global_cache.rs
@@ -44,22 +44,28 @@ impl<X: Cx> GlobalCache<X> {
         cx: X,
         input: X::Input,
 
-        result: X::Result,
+        origin_result: X::Result,
         dep_node: X::DepNodeIndex,
 
         additional_depth: usize,
         encountered_overflow: bool,
         nested_goals: NestedGoals<X>,
     ) {
-        let result = cx.mk_tracked(result, dep_node);
+        let result = cx.mk_tracked(origin_result, dep_node);
         let entry = self.map.entry(input).or_default();
         if encountered_overflow {
             let with_overflow = WithOverflow { nested_goals, result };
             let prev = entry.with_overflow.insert(additional_depth, with_overflow);
-            assert!(prev.is_none());
+            if let Some(prev) = &prev {
+                assert!(cx.evaluation_is_concurrent());
+                assert_eq!(cx.get_tracked(&prev.result), origin_result);
+            }
         } else {
             let prev = entry.success.replace(Success { additional_depth, nested_goals, result });
-            assert!(prev.is_none());
+            if let Some(prev) = &prev {
+                assert!(cx.evaluation_is_concurrent());
+                assert_eq!(cx.get_tracked(&prev.result), origin_result);
+            }
         }
     }
 
diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs
index 418139c3aadc0..ac4d0795a92e8 100644
--- a/compiler/rustc_type_ir/src/search_graph/mod.rs
+++ b/compiler/rustc_type_ir/src/search_graph/mod.rs
@@ -53,6 +53,8 @@ pub trait Cx: Copy {
         mode: SolverMode,
         f: impl FnOnce(&mut GlobalCache<Self>) -> R,
     ) -> R;
+
+    fn evaluation_is_concurrent(&self) -> bool;
 }
 
 pub trait Delegate {
diff --git a/config.example.toml b/config.example.toml
index 17fe9be7d567f..2b5e9ae117db7 100644
--- a/config.example.toml
+++ b/config.example.toml
@@ -42,6 +42,9 @@
 # Unless you're developing for a target where Rust CI doesn't build a compiler
 # toolchain or changing LLVM locally, you probably want to leave this enabled.
 #
+# Set this to `true` to download if CI llvm available otherwise it builds
+# from `src/llvm-project`.
+#
 # Set this to `"if-unchanged"` to download only if the llvm-project has not
 # been modified. You can also use this if you are unsure whether you're on a
 # tier 1 target. All tier 1 targets are currently supported.
@@ -236,7 +239,7 @@
 # Instead of downloading the src/stage0 version of cargo-clippy specified,
 # use this cargo-clippy binary instead as the stage0 snapshot cargo-clippy.
 #
-# Note that this option should be used with the same toolchain as the `rustc` option above. 
+# Note that this option should be used with the same toolchain as the `rustc` option above.
 # Otherwise, clippy is likely to fail due to a toolchain conflict.
 #cargo-clippy = "/path/to/cargo-clippy"
 
diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs
index 9d70487032699..8cdba166c9dff 100644
--- a/library/alloc/src/slice.rs
+++ b/library/alloc/src/slice.rs
@@ -96,6 +96,7 @@ pub(crate) mod hack {
     // We shouldn't add inline attribute to this since this is used in
     // `vec!` macro mostly and causes perf regression. See #71204 for
     // discussion and perf results.
+    #[allow(missing_docs)]
     pub fn into_vec<T, A: Allocator>(b: Box<[T], A>) -> Vec<T, A> {
         unsafe {
             let len = b.len();
@@ -105,6 +106,7 @@ pub(crate) mod hack {
     }
 
     #[cfg(not(no_global_oom_handling))]
+    #[allow(missing_docs)]
     #[inline]
     pub fn to_vec<T: ConvertVec, A: Allocator>(s: &[T], alloc: A) -> Vec<T, A> {
         T::to_vec(s, alloc)
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index bc8b7e24bf12b..05617669ed231 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -508,6 +508,7 @@ impl String {
     // NB see the slice::hack module in slice.rs for more information
     #[inline]
     #[cfg(test)]
+    #[allow(missing_docs)]
     pub fn from_str(_: &str) -> String {
         panic!("not available with cfg(test)");
     }
diff --git a/library/core/src/arch.rs b/library/core/src/arch.rs
index 31d6bc36fc8b9..be734d194be8f 100644
--- a/library/core/src/arch.rs
+++ b/library/core/src/arch.rs
@@ -17,6 +17,20 @@ pub macro asm("assembly template", $(operands,)* $(options($(option),*))?) {
     /* compiler built-in */
 }
 
+/// Inline assembly used in combination with `#[naked]` functions.
+///
+/// Refer to [Rust By Example] for a usage guide and the [reference] for
+/// detailed information about the syntax and available options.
+///
+/// [Rust By Example]: https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html
+/// [reference]: https://doc.rust-lang.org/nightly/reference/inline-assembly.html
+#[unstable(feature = "naked_functions", issue = "90957")]
+#[rustc_builtin_macro]
+#[cfg(not(bootstrap))]
+pub macro naked_asm("assembly template", $(operands,)* $(options($(option),*))?) {
+    /* compiler built-in */
+}
+
 /// Module-level inline assembly.
 ///
 /// Refer to [Rust By Example] for a usage guide and the [reference] for
diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs
index febb3fed96302..3b45d46b31d5e 100644
--- a/library/core/src/ptr/const_ptr.rs
+++ b/library/core/src/ptr/const_ptr.rs
@@ -270,7 +270,7 @@ impl<T: ?Sized> *const T {
     /// }
     /// ```
     #[stable(feature = "ptr_as_ref", since = "1.9.0")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
     #[inline]
     pub const unsafe fn as_ref<'a>(self) -> Option<&'a T> {
         // SAFETY: the caller must guarantee that `self` is valid
@@ -302,7 +302,7 @@ impl<T: ?Sized> *const T {
     /// ```
     // FIXME: mention it in the docs for `as_ref` and `as_uninit_ref` once stabilized.
     #[unstable(feature = "ptr_as_ref_unchecked", issue = "122034")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "ptr_as_ref_unchecked", issue = "122034")]
     #[inline]
     #[must_use]
     pub const unsafe fn as_ref_unchecked<'a>(self) -> &'a T {
@@ -336,7 +336,7 @@ impl<T: ?Sized> *const T {
     /// ```
     #[inline]
     #[unstable(feature = "ptr_as_uninit", issue = "75402")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "ptr_as_uninit", issue = "75402")]
     pub const unsafe fn as_uninit_ref<'a>(self) -> Option<&'a MaybeUninit<T>>
     where
         T: Sized,
@@ -1664,7 +1664,7 @@ impl<T> *const [T] {
     /// [allocated object]: crate::ptr#allocated-object
     #[inline]
     #[unstable(feature = "ptr_as_uninit", issue = "75402")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "ptr_as_uninit", issue = "75402")]
     pub const unsafe fn as_uninit_slice<'a>(self) -> Option<&'a [MaybeUninit<T>]> {
         if self.is_null() {
             None
diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs
index bebc4b2f271eb..ddb9195d2e7c7 100644
--- a/library/core/src/ptr/mut_ptr.rs
+++ b/library/core/src/ptr/mut_ptr.rs
@@ -261,7 +261,7 @@ impl<T: ?Sized> *mut T {
     /// }
     /// ```
     #[stable(feature = "ptr_as_ref", since = "1.9.0")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
     #[inline]
     pub const unsafe fn as_ref<'a>(self) -> Option<&'a T> {
         // SAFETY: the caller must guarantee that `self` is valid for a
@@ -295,7 +295,7 @@ impl<T: ?Sized> *mut T {
     /// ```
     // FIXME: mention it in the docs for `as_ref` and `as_uninit_ref` once stabilized.
     #[unstable(feature = "ptr_as_ref_unchecked", issue = "122034")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "ptr_as_ref_unchecked", issue = "122034")]
     #[inline]
     #[must_use]
     pub const unsafe fn as_ref_unchecked<'a>(self) -> &'a T {
@@ -334,7 +334,7 @@ impl<T: ?Sized> *mut T {
     /// ```
     #[inline]
     #[unstable(feature = "ptr_as_uninit", issue = "75402")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "ptr_as_uninit", issue = "75402")]
     pub const unsafe fn as_uninit_ref<'a>(self) -> Option<&'a MaybeUninit<T>>
     where
         T: Sized,
@@ -580,7 +580,7 @@ impl<T: ?Sized> *mut T {
     /// println!("{s:?}"); // It'll print: "[4, 2, 3]".
     /// ```
     #[stable(feature = "ptr_as_ref", since = "1.9.0")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
     #[inline]
     pub const unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
         // SAFETY: the caller must guarantee that `self` is be valid for
@@ -616,7 +616,7 @@ impl<T: ?Sized> *mut T {
     /// ```
     // FIXME: mention it in the docs for `as_mut` and `as_uninit_mut` once stabilized.
     #[unstable(feature = "ptr_as_ref_unchecked", issue = "122034")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "ptr_as_ref_unchecked", issue = "122034")]
     #[inline]
     #[must_use]
     pub const unsafe fn as_mut_unchecked<'a>(self) -> &'a mut T {
@@ -639,7 +639,7 @@ impl<T: ?Sized> *mut T {
     /// the pointer is [convertible to a reference](crate::ptr#pointer-to-reference-conversion).
     #[inline]
     #[unstable(feature = "ptr_as_uninit", issue = "75402")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "ptr_as_uninit", issue = "75402")]
     pub const unsafe fn as_uninit_mut<'a>(self) -> Option<&'a mut MaybeUninit<T>>
     where
         T: Sized,
@@ -2016,7 +2016,7 @@ impl<T> *mut [T] {
     /// [allocated object]: crate::ptr#allocated-object
     #[inline]
     #[unstable(feature = "ptr_as_uninit", issue = "75402")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "ptr_as_uninit", issue = "75402")]
     pub const unsafe fn as_uninit_slice<'a>(self) -> Option<&'a [MaybeUninit<T>]> {
         if self.is_null() {
             None
@@ -2068,7 +2068,7 @@ impl<T> *mut [T] {
     /// [allocated object]: crate::ptr#allocated-object
     #[inline]
     #[unstable(feature = "ptr_as_uninit", issue = "75402")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "ptr_as_uninit", issue = "75402")]
     pub const unsafe fn as_uninit_slice_mut<'a>(self) -> Option<&'a mut [MaybeUninit<T>]> {
         if self.is_null() {
             None
diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs
index b1429fff74434..673acc2972fe4 100644
--- a/library/core/src/ptr/non_null.rs
+++ b/library/core/src/ptr/non_null.rs
@@ -133,7 +133,7 @@ impl<T: Sized> NonNull<T> {
     #[inline]
     #[must_use]
     #[unstable(feature = "ptr_as_uninit", issue = "75402")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "ptr_as_uninit", issue = "75402")]
     pub const unsafe fn as_uninit_ref<'a>(self) -> &'a MaybeUninit<T> {
         // SAFETY: the caller must guarantee that `self` meets all the
         // requirements for a reference.
@@ -157,7 +157,7 @@ impl<T: Sized> NonNull<T> {
     #[inline]
     #[must_use]
     #[unstable(feature = "ptr_as_uninit", issue = "75402")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "ptr_as_uninit", issue = "75402")]
     pub const unsafe fn as_uninit_mut<'a>(self) -> &'a mut MaybeUninit<T> {
         // SAFETY: the caller must guarantee that `self` meets all the
         // requirements for a reference.
@@ -1563,7 +1563,7 @@ impl<T> NonNull<[T]> {
     #[inline]
     #[must_use]
     #[unstable(feature = "ptr_as_uninit", issue = "75402")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "ptr_as_uninit", issue = "75402")]
     pub const unsafe fn as_uninit_slice<'a>(self) -> &'a [MaybeUninit<T>] {
         // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`.
         unsafe { slice::from_raw_parts(self.cast().as_ptr(), self.len()) }
@@ -1628,7 +1628,7 @@ impl<T> NonNull<[T]> {
     #[inline]
     #[must_use]
     #[unstable(feature = "ptr_as_uninit", issue = "75402")]
-    #[rustc_const_unstable(feature = "const_ptr_as_ref", issue = "91822")]
+    #[rustc_const_unstable(feature = "ptr_as_uninit", issue = "75402")]
     pub const unsafe fn as_uninit_slice_mut<'a>(self) -> &'a mut [MaybeUninit<T>] {
         // SAFETY: the caller must uphold the safety contract for `as_uninit_slice_mut`.
         unsafe { slice::from_raw_parts_mut(self.cast().as_ptr(), self.len()) }
diff --git a/library/core/tests/mem.rs b/library/core/tests/mem.rs
index b7eee10ec3f9c..f3b4387f6a898 100644
--- a/library/core/tests/mem.rs
+++ b/library/core/tests/mem.rs
@@ -773,15 +773,20 @@ fn offset_of_addr() {
 #[test]
 fn const_maybe_uninit_zeroed() {
     // Sanity check for `MaybeUninit::zeroed` in a realistic const situation (plugin array term)
+
+    // It is crucial that this type has no padding!
     #[repr(C)]
     struct Foo {
-        a: Option<&'static str>,
+        a: Option<&'static u8>,
         b: Bar,
         c: f32,
+        _pad: u32,
         d: *const u8,
     }
+
     #[repr(C)]
     struct Bar(usize);
+
     struct FooPtr(*const Foo);
     unsafe impl Sync for FooPtr {}
 
diff --git a/library/std/src/io/buffered/bufreader.rs b/library/std/src/io/buffered/bufreader.rs
index cf226bd28d005..035afbb8368b4 100644
--- a/library/std/src/io/buffered/bufreader.rs
+++ b/library/std/src/io/buffered/bufreader.rs
@@ -267,6 +267,7 @@ impl<R: ?Sized> BufReader<R> {
 // This is only used by a test which asserts that the initialization-tracking is correct.
 #[cfg(test)]
 impl<R: ?Sized> BufReader<R> {
+    #[allow(missing_docs)]
     pub fn initialized(&self) -> usize {
         self.buf.initialized()
     }
diff --git a/library/std/src/io/error.rs b/library/std/src/io/error.rs
index e8ae1d99fbf37..6ecd9469c1740 100644
--- a/library/std/src/io/error.rs
+++ b/library/std/src/io/error.rs
@@ -223,10 +223,10 @@ pub enum ErrorKind {
     #[stable(feature = "rust1", since = "1.0.0")]
     ConnectionReset,
     /// The remote host is not reachable.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     HostUnreachable,
     /// The network containing the remote host is not reachable.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     NetworkUnreachable,
     /// The connection was aborted (terminated) by the remote server.
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -243,7 +243,7 @@ pub enum ErrorKind {
     #[stable(feature = "rust1", since = "1.0.0")]
     AddrNotAvailable,
     /// The system's networking is down.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     NetworkDown,
     /// The operation failed because a pipe was closed.
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -259,18 +259,18 @@ pub enum ErrorKind {
     ///
     /// For example, a filesystem path was specified where one of the intermediate directory
     /// components was, in fact, a plain file.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     NotADirectory,
     /// The filesystem object is, unexpectedly, a directory.
     ///
     /// A directory was specified when a non-directory was expected.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     IsADirectory,
     /// A non-empty directory was specified where an empty directory was expected.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     DirectoryNotEmpty,
     /// The filesystem or storage medium is read-only, but a write operation was attempted.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     ReadOnlyFilesystem,
     /// Loop in the filesystem or IO subsystem; often, too many levels of symbolic links.
     ///
@@ -285,7 +285,7 @@ pub enum ErrorKind {
     ///
     /// With some network filesystems, notably NFS, an open file (or directory) can be invalidated
     /// by problems with the network or server.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     StaleNetworkFileHandle,
     /// A parameter was incorrect.
     #[stable(feature = "rust1", since = "1.0.0")]
@@ -319,13 +319,13 @@ pub enum ErrorKind {
     /// The underlying storage (typically, a filesystem) is full.
     ///
     /// This does not include out of quota errors.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     StorageFull,
     /// Seek on unseekable file.
     ///
     /// Seeking was attempted on an open file handle which is not suitable for seeking - for
     /// example, on Unix, a named pipe opened with `File::open`.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     NotSeekable,
     /// Filesystem quota was exceeded.
     #[unstable(feature = "io_error_more", issue = "86442")]
@@ -335,22 +335,22 @@ pub enum ErrorKind {
     /// This might arise from a hard limit of the underlying filesystem or file access API, or from
     /// an administratively imposed resource limitation.  Simple disk full, and out of quota, have
     /// their own errors.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     FileTooLarge,
     /// Resource is busy.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     ResourceBusy,
     /// Executable file is busy.
     ///
     /// An attempt was made to write to a file which is also in use as a running program.  (Not all
     /// operating systems detect this situation.)
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     ExecutableFileBusy,
     /// Deadlock (avoided).
     ///
     /// A file locking operation would result in deadlock.  This situation is typically detected, if
     /// at all, on a best-effort basis.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     Deadlock,
     /// Cross-device or cross-filesystem (hard) link or rename.
     #[unstable(feature = "io_error_more", issue = "86442")]
@@ -358,7 +358,7 @@ pub enum ErrorKind {
     /// Too many (hard) links to the same filesystem object.
     ///
     /// The filesystem does not support making so many hardlinks to the same file.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     TooManyLinks,
     /// A filename was invalid.
     ///
@@ -369,7 +369,7 @@ pub enum ErrorKind {
     ///
     /// When trying to run an external program, a system or process limit on the size of the
     /// arguments would have been exceeded.
-    #[unstable(feature = "io_error_more", issue = "86442")]
+    #[stable(feature = "io_error_a_bit_more", since = "CURRENT_RUSTC_VERSION")]
     ArgumentListTooLong,
     /// This operation was interrupted.
     ///
diff --git a/library/std/src/sys/pal/unix/stack_overflow.rs b/library/std/src/sys/pal/unix/stack_overflow.rs
index 9ff44b54c41a1..728ce8d60f639 100644
--- a/library/std/src/sys/pal/unix/stack_overflow.rs
+++ b/library/std/src/sys/pal/unix/stack_overflow.rs
@@ -32,7 +32,8 @@ impl Drop for Handler {
     target_os = "macos",
     target_os = "netbsd",
     target_os = "openbsd",
-    target_os = "solaris"
+    target_os = "solaris",
+    target_os = "illumos",
 ))]
 mod imp {
     #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
@@ -280,7 +281,7 @@ mod imp {
         libc::SIGSTKSZ
     }
 
-    #[cfg(target_os = "solaris")]
+    #[cfg(any(target_os = "solaris", target_os = "illumos"))]
     unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
         let mut current_stack: libc::stack_t = crate::mem::zeroed();
         assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
@@ -486,7 +487,12 @@ mod imp {
         Some(guardaddr..guardaddr + page_size)
     }
 
-    #[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))]
+    #[cfg(any(
+        target_os = "macos",
+        target_os = "openbsd",
+        target_os = "solaris",
+        target_os = "illumos",
+    ))]
     // FIXME: I am probably not unsafe.
     unsafe fn current_guard() -> Option<Range<usize>> {
         let stackptr = get_stack_start()?;
@@ -569,7 +575,8 @@ mod imp {
     target_os = "macos",
     target_os = "netbsd",
     target_os = "openbsd",
-    target_os = "solaris"
+    target_os = "solaris",
+    target_os = "illumos",
 )))]
 mod imp {
     pub unsafe fn init() {}
diff --git a/src/bootstrap/defaults/config.compiler.toml b/src/bootstrap/defaults/config.compiler.toml
index 789586b58f706..147939d2047e8 100644
--- a/src/bootstrap/defaults/config.compiler.toml
+++ b/src/bootstrap/defaults/config.compiler.toml
@@ -27,4 +27,5 @@ assertions = false
 # Enable warnings during the LLVM compilation (when LLVM is changed, causing a compilation)
 enable-warnings = true
 # Will download LLVM from CI if available on your platform.
-download-ci-llvm = "if-unchanged"
+# If you intend to modify `src/llvm-project`, use `"if-unchanged"` or `false` instead.
+download-ci-llvm = true
diff --git a/src/bootstrap/download-ci-llvm-stamp b/src/bootstrap/download-ci-llvm-stamp
index 909015305015b..42cecbf5df9bb 100644
--- a/src/bootstrap/download-ci-llvm-stamp
+++ b/src/bootstrap/download-ci-llvm-stamp
@@ -1,4 +1,4 @@
 Change this file to make users of the `download-ci-llvm` configuration download
 a new version of LLVM from CI, even if the LLVM submodule hasn’t changed.
 
-Last change is for: https://github.com/rust-lang/rust/pull/129116
+Last change is for: https://github.com/rust-lang/rust/pull/129788
diff --git a/src/bootstrap/src/core/build_steps/compile.rs b/src/bootstrap/src/core/build_steps/compile.rs
index 102c9fd255432..e1a5d173e56de 100644
--- a/src/bootstrap/src/core/build_steps/compile.rs
+++ b/src/bootstrap/src/core/build_steps/compile.rs
@@ -29,7 +29,7 @@ use crate::utils::helpers::{
     self, exe, get_clang_cl_resource_dir, get_closest_merge_base_commit, is_debug_info, is_dylib,
     symlink_dir, t, up_to_date,
 };
-use crate::{CLang, Compiler, DependencyType, GitRepo, Mode, LLVM_TOOLS};
+use crate::{CLang, Compiler, DependencyType, GitRepo, Mode};
 
 #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
 pub struct Std {
@@ -1912,52 +1912,20 @@ impl Step for Assemble {
         // delegates to the `rust-lld` binary for linking and then runs
         // logic to create the final binary. This is used by the
         // `wasm32-wasip2` target of Rust.
-        if builder.tool_enabled("wasm-component-ld") {
-            let wasm_component_ld_exe =
-                builder.ensure(crate::core::build_steps::tool::WasmComponentLd {
-                    compiler: build_compiler,
-                    target: target_compiler.host,
-                });
-            builder.copy_link(
-                &wasm_component_ld_exe,
-                &libdir_bin.join(wasm_component_ld_exe.file_name().unwrap()),
-            );
-        }
-
-        if builder.config.llvm_enabled(target_compiler.host) {
-            let llvm::LlvmResult { llvm_config, .. } =
-                builder.ensure(llvm::Llvm { target: target_compiler.host });
-            if !builder.config.dry_run() && builder.config.llvm_tools_enabled {
-                let llvm_bin_dir =
-                    command(llvm_config).arg("--bindir").run_capture_stdout(builder).stdout();
-                let llvm_bin_dir = Path::new(llvm_bin_dir.trim());
-
-                // Since we've already built the LLVM tools, install them to the sysroot.
-                // This is the equivalent of installing the `llvm-tools-preview` component via
-                // rustup, and lets developers use a locally built toolchain to
-                // build projects that expect llvm tools to be present in the sysroot
-                // (e.g. the `bootimage` crate).
-                for tool in LLVM_TOOLS {
-                    let tool_exe = exe(tool, target_compiler.host);
-                    let src_path = llvm_bin_dir.join(&tool_exe);
-                    // When using `download-ci-llvm`, some of the tools
-                    // may not exist, so skip trying to copy them.
-                    if src_path.exists() {
-                        builder.copy_link(&src_path, &libdir_bin.join(&tool_exe));
-                    }
-                }
-            }
-        }
+        dist::maybe_install_wasm_component_ld(
+            builder,
+            build_compiler,
+            target_compiler.host,
+            &libdir_bin,
+        );
 
-        if builder.config.llvm_bitcode_linker_enabled {
-            let src_path = builder.ensure(crate::core::build_steps::tool::LlvmBitcodeLinker {
-                compiler: build_compiler,
-                target: target_compiler.host,
-                extra_features: vec![],
-            });
-            let tool_exe = exe("llvm-bitcode-linker", target_compiler.host);
-            builder.copy_link(&src_path, &libdir_bin.join(tool_exe));
-        }
+        dist::maybe_install_llvm_tools(builder, target_compiler.host, &libdir_bin);
+        dist::maybe_install_llvm_bitcode_linker(
+            builder,
+            build_compiler,
+            target_compiler.host,
+            &libdir_bin,
+        );
 
         // Ensure that `libLLVM.so` ends up in the newly build compiler directory,
         // so that it can be found when the newly built `rustc` is run.
diff --git a/src/bootstrap/src/core/build_steps/dist.rs b/src/bootstrap/src/core/build_steps/dist.rs
index b0bd18792beb2..509668678c3a0 100644
--- a/src/bootstrap/src/core/build_steps/dist.rs
+++ b/src/bootstrap/src/core/build_steps/dist.rs
@@ -473,11 +473,10 @@ impl Step for Rustc {
                     );
                 }
             }
-            if builder.tool_enabled("wasm-component-ld") {
-                let src_dir = builder.sysroot_libdir(compiler, host).parent().unwrap().join("bin");
-                let ld = exe("wasm-component-ld", compiler.host);
-                builder.copy_link(&src_dir.join(&ld), &dst_dir.join(&ld));
-            }
+
+            maybe_install_wasm_component_ld(builder, compiler, compiler.host, &dst_dir);
+            maybe_install_llvm_tools(builder, compiler.host, &dst_dir);
+            maybe_install_llvm_bitcode_linker(builder, compiler, compiler.host, &dst_dir);
 
             // Man pages
             t!(fs::create_dir_all(image.join("share/man/man1")));
@@ -2086,6 +2085,74 @@ pub fn maybe_install_llvm_runtime(builder: &Builder<'_>, target: TargetSelection
     }
 }
 
+/// Maybe add LLVM tools to the rustc sysroot.
+pub fn maybe_install_llvm_tools(builder: &Builder<'_>, target: TargetSelection, dst_dir: &Path) {
+    if builder.config.llvm_enabled(target) {
+        let llvm::LlvmResult { llvm_config, .. } = builder.ensure(llvm::Llvm { target });
+        if !builder.config.dry_run() && builder.config.llvm_tools_enabled {
+            let llvm_bin_dir =
+                command(llvm_config).arg("--bindir").run_capture_stdout(builder).stdout();
+            let llvm_bin_dir = Path::new(llvm_bin_dir.trim());
+
+            // Since we've already built the LLVM tools, install them to the sysroot.
+            // This is the equivalent of installing the `llvm-tools-preview` component via
+            // rustup, and lets developers use a locally built toolchain to
+            // build projects that expect llvm tools to be present in the sysroot
+            // (e.g. the `bootimage` crate).
+            for tool in LLVM_TOOLS {
+                let tool_exe = exe(tool, target);
+                let src_path = llvm_bin_dir.join(&tool_exe);
+                // When using `download-ci-llvm`, some of the tools
+                // may not exist, so skip trying to copy them.
+                if src_path.exists() {
+                    builder.copy_link(&src_path, &dst_dir.join(&tool_exe));
+                }
+            }
+        }
+    }
+}
+
+/// Maybe add `llvm-bitcode-linker` to the rustc sysroot.
+pub fn maybe_install_llvm_bitcode_linker(
+    builder: &Builder<'_>,
+    compiler: Compiler,
+    target: TargetSelection,
+    dst_dir: &Path,
+) {
+    if builder.config.llvm_bitcode_linker_enabled {
+        let dst_dir = dst_dir.join("self-contained");
+        t!(std::fs::create_dir_all(&dst_dir));
+
+        let src_path = builder.ensure(crate::core::build_steps::tool::LlvmBitcodeLinker {
+            compiler,
+            target,
+            extra_features: vec![],
+        });
+
+        let tool_exe = exe("llvm-bitcode-linker", target);
+
+        builder.copy_link(&src_path, &dst_dir.join(tool_exe));
+    }
+}
+
+/// Maybe add `wasm-component-ld` to the rustc sysroot.
+pub fn maybe_install_wasm_component_ld(
+    builder: &Builder<'_>,
+    compiler: Compiler,
+    target: TargetSelection,
+    dst_dir: &Path,
+) {
+    if builder.tool_enabled("wasm-component-ld") {
+        let wasm_component_ld_exe =
+            builder.ensure(crate::core::build_steps::tool::WasmComponentLd { compiler, target });
+
+        builder.copy_link(
+            &wasm_component_ld_exe,
+            &dst_dir.join(wasm_component_ld_exe.file_name().unwrap()),
+        );
+    }
+}
+
 #[derive(Clone, Debug, Eq, Hash, PartialEq)]
 pub struct LlvmTools {
     pub target: TargetSelection,
diff --git a/src/bootstrap/src/core/config/config.rs b/src/bootstrap/src/core/config/config.rs
index de861c42c4c4e..f509712730dee 100644
--- a/src/bootstrap/src/core/config/config.rs
+++ b/src/bootstrap/src/core/config/config.rs
@@ -2766,7 +2766,8 @@ impl Config {
                     );
                 }
 
-                b
+                // If download-ci-llvm=true we also want to check that CI llvm is available
+                b && llvm::is_ci_llvm_available(self, asserts)
             }
             Some(StringOrBool::String(s)) if s == "if-unchanged" => if_unchanged(),
             Some(StringOrBool::String(other)) => {
diff --git a/src/bootstrap/src/utils/change_tracker.rs b/src/bootstrap/src/utils/change_tracker.rs
index 80ab09881fe1c..99bcc6e0787ff 100644
--- a/src/bootstrap/src/utils/change_tracker.rs
+++ b/src/bootstrap/src/utils/change_tracker.rs
@@ -250,4 +250,9 @@ pub const CONFIG_CHANGE_HISTORY: &[ChangeInfo] = &[
         severity: ChangeSeverity::Info,
         summary: "New option `llvm.enzyme` to control whether the llvm based autodiff tool (Enzyme) is built.",
     },
+    ChangeInfo {
+        change_id: 129473,
+        severity: ChangeSeverity::Warning,
+        summary: "`download-ci-llvm = true` now checks if CI llvm is available and has become the default for the compiler profile",
+    },
 ];
diff --git a/src/tools/miri/src/concurrency/data_race.rs b/src/tools/miri/src/concurrency/data_race.rs
index 6fd207c92b937..b604fd868a02a 100644
--- a/src/tools/miri/src/concurrency/data_race.rs
+++ b/src/tools/miri/src/concurrency/data_race.rs
@@ -637,7 +637,7 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
         // The program didn't actually do a read, so suppress the memory access hooks.
         // This is also a very special exception where we just ignore an error -- if this read
         // was UB e.g. because the memory is uninitialized, we don't want to know!
-        let old_val = this.run_for_validation(|| this.read_scalar(dest)).ok();
+        let old_val = this.run_for_validation(|this| this.read_scalar(dest)).ok();
         this.allow_data_races_mut(move |this| this.write_scalar(val, dest))?;
         this.validate_atomic_store(dest, atomic)?;
         this.buffered_atomic_write(val, dest, atomic, old_val)
diff --git a/src/tools/miri/src/helpers.rs b/src/tools/miri/src/helpers.rs
index a546ad20fef98..cba99c0bd7a81 100644
--- a/src/tools/miri/src/helpers.rs
+++ b/src/tools/miri/src/helpers.rs
@@ -869,7 +869,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
     /// Dereference a pointer operand to a place using `layout` instead of the pointer's declared type
     fn deref_pointer_as(
         &self,
-        op: &impl Readable<'tcx, Provenance>,
+        op: &impl Projectable<'tcx, Provenance>,
         layout: TyAndLayout<'tcx>,
     ) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
         let this = self.eval_context_ref();
@@ -880,7 +880,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
     /// Calculates the MPlaceTy given the offset and layout of an access on an operand
     fn deref_pointer_and_offset(
         &self,
-        op: &impl Readable<'tcx, Provenance>,
+        op: &impl Projectable<'tcx, Provenance>,
         offset: u64,
         base_layout: TyAndLayout<'tcx>,
         value_layout: TyAndLayout<'tcx>,
@@ -897,7 +897,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
 
     fn deref_pointer_and_read(
         &self,
-        op: &impl Readable<'tcx, Provenance>,
+        op: &impl Projectable<'tcx, Provenance>,
         offset: u64,
         base_layout: TyAndLayout<'tcx>,
         value_layout: TyAndLayout<'tcx>,
@@ -909,7 +909,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
 
     fn deref_pointer_and_write(
         &mut self,
-        op: &impl Readable<'tcx, Provenance>,
+        op: &impl Projectable<'tcx, Provenance>,
         offset: u64,
         value: impl Into<Scalar>,
         base_layout: TyAndLayout<'tcx>,
diff --git a/src/tools/miri/src/intrinsics/mod.rs b/src/tools/miri/src/intrinsics/mod.rs
index 18b22827bdb15..0ab1b9dfb61e9 100644
--- a/src/tools/miri/src/intrinsics/mod.rs
+++ b/src/tools/miri/src/intrinsics/mod.rs
@@ -152,8 +152,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
             // ```
             // Would not be considered UB, or the other way around (`is_val_statically_known(0)`).
             "is_val_statically_known" => {
-                let [arg] = check_arg_count(args)?;
-                this.validate_operand(arg, /*recursive*/ false)?;
+                let [_arg] = check_arg_count(args)?;
+                // FIXME: should we check for validity here? It's tricky because we do not have a
+                // place. Codegen does not seem to set any attributes like `noundef` for intrinsic
+                // calls, so we don't *have* to do anything.
                 let branch: bool = this.machine.rng.get_mut().gen();
                 this.write_scalar(Scalar::from_bool(branch), dest)?;
             }
diff --git a/src/tools/miri/src/machine.rs b/src/tools/miri/src/machine.rs
index 2cd57e72871ba..76b4366476d52 100644
--- a/src/tools/miri/src/machine.rs
+++ b/src/tools/miri/src/machine.rs
@@ -572,6 +572,9 @@ pub struct MiriMachine<'tcx> {
     /// Invariant: the promised alignment will never be less than the native alignment of the
     /// allocation.
     pub(crate) symbolic_alignment: RefCell<FxHashMap<AllocId, (Size, Align)>>,
+
+    /// A cache of "data range" computations for unions (i.e., the offsets of non-padding bytes).
+    union_data_ranges: FxHashMap<Ty<'tcx>, RangeSet>,
 }
 
 impl<'tcx> MiriMachine<'tcx> {
@@ -714,6 +717,7 @@ impl<'tcx> MiriMachine<'tcx> {
             allocation_spans: RefCell::new(FxHashMap::default()),
             const_cache: RefCell::new(FxHashMap::default()),
             symbolic_alignment: RefCell::new(FxHashMap::default()),
+            union_data_ranges: FxHashMap::default(),
         }
     }
 
@@ -826,6 +830,7 @@ impl VisitProvenance for MiriMachine<'_> {
             allocation_spans: _,
             const_cache: _,
             symbolic_alignment: _,
+            union_data_ranges: _,
         } = self;
 
         threads.visit_provenance(visit);
@@ -1627,4 +1632,12 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
             ecx.machine.rng.borrow_mut().gen::<usize>() % ADDRS_PER_ANON_GLOBAL
         }
     }
+
+    fn cached_union_data_range<'e>(
+        ecx: &'e mut InterpCx<'tcx, Self>,
+        ty: Ty<'tcx>,
+        compute_range: impl FnOnce() -> RangeSet,
+    ) -> Cow<'e, RangeSet> {
+        Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range))
+    }
 }
diff --git a/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance0.rs b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance0.rs
new file mode 100644
index 0000000000000..fd0773ed916b7
--- /dev/null
+++ b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance0.rs
@@ -0,0 +1,10 @@
+use std::mem;
+
+// Doing a copy at integer type should lose provenance.
+// This tests the unoptimized base case.
+fn main() {
+    let ptrs = [(&42, true)];
+    let ints: [(usize, bool); 1] = unsafe { mem::transmute(ptrs) };
+    let ptr = (&raw const ints[0].0).cast::<&i32>();
+    let _val = unsafe { *ptr.read() }; //~ERROR: dangling
+}
diff --git a/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance0.stderr b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance0.stderr
new file mode 100644
index 0000000000000..fc012af3ad877
--- /dev/null
+++ b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance0.stderr
@@ -0,0 +1,15 @@
+error: Undefined Behavior: constructing invalid value: encountered a dangling reference ($HEX[noalloc] has no provenance)
+  --> $DIR/int_copy_looses_provenance0.rs:LL:CC
+   |
+LL |     let _val = unsafe { *ptr.read() };
+   |                          ^^^^^^^^^^ constructing invalid value: encountered a dangling reference ($HEX[noalloc] has no provenance)
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `main` at $DIR/int_copy_looses_provenance0.rs:LL:CC
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance1.rs b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance1.rs
new file mode 100644
index 0000000000000..ce64dcc1a07c4
--- /dev/null
+++ b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance1.rs
@@ -0,0 +1,10 @@
+use std::mem;
+
+// Doing a copy at integer type should lose provenance.
+// This tests the optimized-array case of integer copies.
+fn main() {
+    let ptrs = [&42];
+    let ints: [usize; 1] = unsafe { mem::transmute(ptrs) };
+    let ptr = (&raw const ints[0]).cast::<&i32>();
+    let _val = unsafe { *ptr.read() }; //~ERROR: dangling
+}
diff --git a/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance1.stderr b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance1.stderr
new file mode 100644
index 0000000000000..375262655d0cf
--- /dev/null
+++ b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance1.stderr
@@ -0,0 +1,15 @@
+error: Undefined Behavior: constructing invalid value: encountered a dangling reference ($HEX[noalloc] has no provenance)
+  --> $DIR/int_copy_looses_provenance1.rs:LL:CC
+   |
+LL |     let _val = unsafe { *ptr.read() };
+   |                          ^^^^^^^^^^ constructing invalid value: encountered a dangling reference ($HEX[noalloc] has no provenance)
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `main` at $DIR/int_copy_looses_provenance1.rs:LL:CC
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance2.rs b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance2.rs
new file mode 100644
index 0000000000000..e8966c53d7059
--- /dev/null
+++ b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance2.rs
@@ -0,0 +1,12 @@
+use std::mem;
+
+// Doing a copy at integer type should lose provenance.
+// This tests the case where provenacne is hiding in the metadata of a pointer.
+fn main() {
+    let ptrs = [(&42, &42)];
+    // Typed copy at wide pointer type (with integer-typed metadata).
+    let ints: [*const [usize]; 1] = unsafe { mem::transmute(ptrs) };
+    // Get a pointer to the metadata field.
+    let ptr = (&raw const ints[0]).wrapping_byte_add(mem::size_of::<*const ()>()).cast::<&i32>();
+    let _val = unsafe { *ptr.read() }; //~ERROR: dangling
+}
diff --git a/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance2.stderr b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance2.stderr
new file mode 100644
index 0000000000000..8402c7b5e130f
--- /dev/null
+++ b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance2.stderr
@@ -0,0 +1,15 @@
+error: Undefined Behavior: constructing invalid value: encountered a dangling reference ($HEX[noalloc] has no provenance)
+  --> $DIR/int_copy_looses_provenance2.rs:LL:CC
+   |
+LL |     let _val = unsafe { *ptr.read() };
+   |                          ^^^^^^^^^^ constructing invalid value: encountered a dangling reference ($HEX[noalloc] has no provenance)
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `main` at $DIR/int_copy_looses_provenance2.rs:LL:CC
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance3.rs b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance3.rs
new file mode 100644
index 0000000000000..48a48ce4587ee
--- /dev/null
+++ b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance3.rs
@@ -0,0 +1,29 @@
+#![feature(strict_provenance)]
+use std::mem;
+
+#[repr(C, usize)]
+#[allow(unused)]
+enum E {
+    Var1(usize),
+    Var2(usize),
+}
+
+// Doing a copy at integer type should lose provenance.
+// This tests the case where provenacne is hiding in the discriminant of an enum.
+fn main() {
+    assert_eq!(mem::size_of::<E>(), 2*mem::size_of::<usize>());
+
+    // We want to store provenance in the enum discriminant, but the value still needs to
+    // be valid atfor the type. So we split provenance and data.
+    let ptr = &42;
+    let ptr = ptr as *const i32;
+    let ptrs = [(ptr.with_addr(0), ptr)];
+    // Typed copy at the enum type.
+    let ints: [E; 1] = unsafe { mem::transmute(ptrs) };
+    // Read the discriminant.
+    let discr = unsafe { (&raw const ints[0]).cast::<*const i32>().read() };
+    // Take the provenance from there, together with the original address.
+    let ptr = discr.with_addr(ptr.addr());
+    // There should be no provenance is `discr`, so this should be UB.
+    let _val = unsafe { *ptr }; //~ERROR: dangling
+}
diff --git a/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance3.stderr b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance3.stderr
new file mode 100644
index 0000000000000..b50e23da96a7e
--- /dev/null
+++ b/src/tools/miri/tests/fail/provenance/int_copy_looses_provenance3.stderr
@@ -0,0 +1,15 @@
+error: Undefined Behavior: memory access failed: expected a pointer to 4 bytes of memory, but got $HEX[noalloc] which is a dangling pointer (it has no provenance)
+  --> $DIR/int_copy_looses_provenance3.rs:LL:CC
+   |
+LL |     let _val = unsafe { *ptr };
+   |                         ^^^^ memory access failed: expected a pointer to 4 bytes of memory, but got $HEX[noalloc] which is a dangling pointer (it has no provenance)
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `main` at $DIR/int_copy_looses_provenance3.rs:LL:CC
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/provenance/ptr_copy_loses_partial_provenance0.rs b/src/tools/miri/tests/fail/provenance/ptr_copy_loses_partial_provenance0.rs
new file mode 100644
index 0000000000000..ff94f2263c517
--- /dev/null
+++ b/src/tools/miri/tests/fail/provenance/ptr_copy_loses_partial_provenance0.rs
@@ -0,0 +1,18 @@
+fn main() {
+    let half_ptr = std::mem::size_of::<*const ()>() / 2;
+    let mut bytes = [1u8; 16];
+    let bytes = bytes.as_mut_ptr();
+
+    unsafe {
+        // Put a pointer in the middle.
+        bytes.add(half_ptr).cast::<&i32>().write_unaligned(&42);
+        // Typed copy of the entire thing as two pointers, but not perfectly
+        // overlapping with the pointer we have in there.
+        let copy = bytes.cast::<[*const (); 2]>().read_unaligned();
+        let copy_bytes = copy.as_ptr().cast::<u8>();
+        // Now go to the middle of the copy and get the pointer back out.
+        let ptr = copy_bytes.add(half_ptr).cast::<*const i32>().read_unaligned();
+        // Dereferencing this should fail as the copy has removed the provenance.
+        let _val = *ptr; //~ERROR: dangling
+    }
+}
diff --git a/src/tools/miri/tests/fail/provenance/ptr_copy_loses_partial_provenance0.stderr b/src/tools/miri/tests/fail/provenance/ptr_copy_loses_partial_provenance0.stderr
new file mode 100644
index 0000000000000..ed38572a5f398
--- /dev/null
+++ b/src/tools/miri/tests/fail/provenance/ptr_copy_loses_partial_provenance0.stderr
@@ -0,0 +1,15 @@
+error: Undefined Behavior: memory access failed: expected a pointer to 4 bytes of memory, but got $HEX[noalloc] which is a dangling pointer (it has no provenance)
+  --> $DIR/ptr_copy_loses_partial_provenance0.rs:LL:CC
+   |
+LL |         let _val = *ptr;
+   |                    ^^^^ memory access failed: expected a pointer to 4 bytes of memory, but got $HEX[noalloc] which is a dangling pointer (it has no provenance)
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `main` at $DIR/ptr_copy_loses_partial_provenance0.rs:LL:CC
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/provenance/ptr_copy_loses_partial_provenance1.rs b/src/tools/miri/tests/fail/provenance/ptr_copy_loses_partial_provenance1.rs
new file mode 100644
index 0000000000000..d0e3dac7792b0
--- /dev/null
+++ b/src/tools/miri/tests/fail/provenance/ptr_copy_loses_partial_provenance1.rs
@@ -0,0 +1,18 @@
+fn main() {
+    let half_ptr = std::mem::size_of::<*const ()>() / 2;
+    let mut bytes = [1u8; 16];
+    let bytes = bytes.as_mut_ptr();
+
+    unsafe {
+        // Put a pointer in the middle.
+        bytes.add(half_ptr).cast::<&i32>().write_unaligned(&42);
+        // Typed copy of the entire thing as two *function* pointers, but not perfectly
+        // overlapping with the pointer we have in there.
+        let copy = bytes.cast::<[fn(); 2]>().read_unaligned();
+        let copy_bytes = copy.as_ptr().cast::<u8>();
+        // Now go to the middle of the copy and get the pointer back out.
+        let ptr = copy_bytes.add(half_ptr).cast::<*const i32>().read_unaligned();
+        // Dereferencing this should fail as the copy has removed the provenance.
+        let _val = *ptr; //~ERROR: dangling
+    }
+}
diff --git a/src/tools/miri/tests/fail/provenance/ptr_copy_loses_partial_provenance1.stderr b/src/tools/miri/tests/fail/provenance/ptr_copy_loses_partial_provenance1.stderr
new file mode 100644
index 0000000000000..2e11687175afe
--- /dev/null
+++ b/src/tools/miri/tests/fail/provenance/ptr_copy_loses_partial_provenance1.stderr
@@ -0,0 +1,15 @@
+error: Undefined Behavior: memory access failed: expected a pointer to 4 bytes of memory, but got $HEX[noalloc] which is a dangling pointer (it has no provenance)
+  --> $DIR/ptr_copy_loses_partial_provenance1.rs:LL:CC
+   |
+LL |         let _val = *ptr;
+   |                    ^^^^ memory access failed: expected a pointer to 4 bytes of memory, but got $HEX[noalloc] which is a dangling pointer (it has no provenance)
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `main` at $DIR/ptr_copy_loses_partial_provenance1.rs:LL:CC
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/uninit/padding-enum.rs b/src/tools/miri/tests/fail/uninit/padding-enum.rs
new file mode 100644
index 0000000000000..3852ac5c477d5
--- /dev/null
+++ b/src/tools/miri/tests/fail/uninit/padding-enum.rs
@@ -0,0 +1,23 @@
+use std::mem;
+
+// We have three fields to avoid the ScalarPair optimization.
+#[allow(unused)]
+enum E {
+    None,
+    Some(&'static (), &'static (), usize),
+}
+
+fn main() { unsafe {
+    let mut p: mem::MaybeUninit<E> = mem::MaybeUninit::zeroed();
+    // The copy when `E` is returned from `transmute` should destroy padding
+    // (even when we use `write_unaligned`, which under the hood uses an untyped copy).
+    p.as_mut_ptr().write_unaligned(mem::transmute((0usize, 0usize, 0usize)));
+    // This is a `None`, so everything but the discriminant is padding.
+    assert!(matches!(*p.as_ptr(), E::None));
+
+    // Turns out the discriminant is (currently) stored
+    // in the 2nd pointer, so the first half is padding.
+    let c = &p as *const _ as *const u8;
+    let _val = *c.add(0); // Get a padding byte.
+    //~^ERROR: uninitialized
+} }
diff --git a/src/tools/miri/tests/fail/uninit/padding-enum.stderr b/src/tools/miri/tests/fail/uninit/padding-enum.stderr
new file mode 100644
index 0000000000000..c571f18874076
--- /dev/null
+++ b/src/tools/miri/tests/fail/uninit/padding-enum.stderr
@@ -0,0 +1,15 @@
+error: Undefined Behavior: using uninitialized data, but this operation requires initialized memory
+  --> $DIR/padding-enum.rs:LL:CC
+   |
+LL |     let _val = *c.add(0); // Get a padding byte.
+   |                ^^^^^^^^^ using uninitialized data, but this operation requires initialized memory
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `main` at $DIR/padding-enum.rs:LL:CC
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/uninit/padding-pair.rs b/src/tools/miri/tests/fail/uninit/padding-pair.rs
new file mode 100644
index 0000000000000..c8c00b3c65a06
--- /dev/null
+++ b/src/tools/miri/tests/fail/uninit/padding-pair.rs
@@ -0,0 +1,25 @@
+#![feature(core_intrinsics)]
+
+use std::mem::{self, MaybeUninit};
+
+fn main() {
+    // This constructs a `(usize, bool)` pair: 9 bytes initialized, the rest not.
+    // Ensure that these 9 bytes are indeed initialized, and the rest is indeed not.
+    // This should be the case even if we write into previously initialized storage.
+    let mut x: MaybeUninit<Box<[u8]>> = MaybeUninit::zeroed();
+    let z = std::intrinsics::add_with_overflow(0usize, 0usize);
+    unsafe { x.as_mut_ptr().cast::<(usize, bool)>().write(z) };
+    // Now read this bytewise. There should be (`ptr_size + 1`) def bytes followed by
+    // (`ptr_size - 1`) undef bytes (the padding after the bool) in there.
+    let z: *const u8 = &x as *const _ as *const _;
+    let first_undef = mem::size_of::<usize>() as isize + 1;
+    for i in 0..first_undef {
+        let byte = unsafe { *z.offset(i) };
+        assert_eq!(byte, 0);
+    }
+    let v = unsafe { *z.offset(first_undef) };
+    //~^ ERROR: uninitialized
+    if v == 0 {
+        println!("it is zero");
+    }
+}
diff --git a/src/tools/miri/tests/fail/uninit/padding-pair.stderr b/src/tools/miri/tests/fail/uninit/padding-pair.stderr
new file mode 100644
index 0000000000000..d35934d83d58f
--- /dev/null
+++ b/src/tools/miri/tests/fail/uninit/padding-pair.stderr
@@ -0,0 +1,15 @@
+error: Undefined Behavior: using uninitialized data, but this operation requires initialized memory
+  --> $DIR/padding-pair.rs:LL:CC
+   |
+LL |     let v = unsafe { *z.offset(first_undef) };
+   |                      ^^^^^^^^^^^^^^^^^^^^^^ using uninitialized data, but this operation requires initialized memory
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `main` at $DIR/padding-pair.rs:LL:CC
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/uninit/padding-struct-in-union.rs b/src/tools/miri/tests/fail/uninit/padding-struct-in-union.rs
new file mode 100644
index 0000000000000..132b85828362d
--- /dev/null
+++ b/src/tools/miri/tests/fail/uninit/padding-struct-in-union.rs
@@ -0,0 +1,32 @@
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+struct Foo {
+    val16: u16,
+    // Padding bytes go here!
+    val32: u32,
+}
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+struct Bar {
+    bytes: [u8; 8],
+}
+
+#[repr(C)]
+union FooBar {
+    foo: Foo,
+    bar: Bar,
+}
+
+pub fn main() {
+    // Initialize as u8 to ensure padding bytes are zeroed.
+    let mut foobar = FooBar { bar: Bar { bytes: [0u8; 8] } };
+    // Reading either field is ok.
+    let _val = unsafe { (foobar.foo, foobar.bar) };
+    // Does this assignment copy the uninitialized padding bytes
+    // over the initialized padding bytes? miri doesn't seem to think so.
+    foobar.foo = Foo { val16: 1, val32: 2 };
+    // This resets the padding to uninit.
+    let _val = unsafe { (foobar.foo, foobar.bar) };
+    //~^ ERROR: uninitialized
+}
diff --git a/src/tools/miri/tests/fail/uninit/padding-struct-in-union.stderr b/src/tools/miri/tests/fail/uninit/padding-struct-in-union.stderr
new file mode 100644
index 0000000000000..e122249af16e5
--- /dev/null
+++ b/src/tools/miri/tests/fail/uninit/padding-struct-in-union.stderr
@@ -0,0 +1,15 @@
+error: Undefined Behavior: constructing invalid value at .bytes[2]: encountered uninitialized memory, but expected an integer
+  --> $DIR/padding-struct-in-union.rs:LL:CC
+   |
+LL |     let _val = unsafe { (foobar.foo, foobar.bar) };
+   |                                      ^^^^^^^^^^ constructing invalid value at .bytes[2]: encountered uninitialized memory, but expected an integer
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `main` at $DIR/padding-struct-in-union.rs:LL:CC
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/uninit/padding-struct.rs b/src/tools/miri/tests/fail/uninit/padding-struct.rs
new file mode 100644
index 0000000000000..dd3be50343902
--- /dev/null
+++ b/src/tools/miri/tests/fail/uninit/padding-struct.rs
@@ -0,0 +1,11 @@
+use std::mem;
+
+#[repr(C)]
+struct Pair(u8, u16);
+
+fn main() { unsafe {
+    let p: Pair = mem::transmute(0u32); // The copy when `Pair` is returned from `transmute` should destroy padding.
+    let c = &p as *const _ as *const u8;
+    let _val = *c.add(1); // Get the padding byte.
+    //~^ERROR: uninitialized
+} }
diff --git a/src/tools/miri/tests/fail/uninit/padding-struct.stderr b/src/tools/miri/tests/fail/uninit/padding-struct.stderr
new file mode 100644
index 0000000000000..8dc40a482ac52
--- /dev/null
+++ b/src/tools/miri/tests/fail/uninit/padding-struct.stderr
@@ -0,0 +1,15 @@
+error: Undefined Behavior: using uninitialized data, but this operation requires initialized memory
+  --> $DIR/padding-struct.rs:LL:CC
+   |
+LL |     let _val = *c.add(1); // Get the padding byte.
+   |                ^^^^^^^^^ using uninitialized data, but this operation requires initialized memory
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `main` at $DIR/padding-struct.rs:LL:CC
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/uninit/padding-union.rs b/src/tools/miri/tests/fail/uninit/padding-union.rs
new file mode 100644
index 0000000000000..2e9e0a40d6c68
--- /dev/null
+++ b/src/tools/miri/tests/fail/uninit/padding-union.rs
@@ -0,0 +1,14 @@
+use std::mem;
+
+#[allow(unused)]
+#[repr(C)]
+union U {
+    field: (u8, u16),
+}
+
+fn main() { unsafe {
+    let p: U = mem::transmute(0u32); // The copy when `U` is returned from `transmute` should destroy padding.
+    let c = &p as *const _ as *const [u8; 4];
+    let _val = *c; // Read the entire thing, definitely contains the padding byte.
+    //~^ERROR: uninitialized
+} }
diff --git a/src/tools/miri/tests/fail/uninit/padding-union.stderr b/src/tools/miri/tests/fail/uninit/padding-union.stderr
new file mode 100644
index 0000000000000..04002da4f195c
--- /dev/null
+++ b/src/tools/miri/tests/fail/uninit/padding-union.stderr
@@ -0,0 +1,15 @@
+error: Undefined Behavior: constructing invalid value at [1]: encountered uninitialized memory, but expected an integer
+  --> $DIR/padding-union.rs:LL:CC
+   |
+LL |     let _val = *c; // Read the entire thing, definitely contains the padding byte.
+   |                ^^ constructing invalid value at [1]: encountered uninitialized memory, but expected an integer
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `main` at $DIR/padding-union.rs:LL:CC
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/uninit/padding-wide-ptr.rs b/src/tools/miri/tests/fail/uninit/padding-wide-ptr.rs
new file mode 100644
index 0000000000000..0403a9caba66d
--- /dev/null
+++ b/src/tools/miri/tests/fail/uninit/padding-wide-ptr.rs
@@ -0,0 +1,18 @@
+use std::mem;
+
+// If this is `None`, the metadata becomes padding.
+type T = Option<&'static str>;
+
+fn main() { unsafe {
+    let mut p: mem::MaybeUninit<T> = mem::MaybeUninit::zeroed();
+    // The copy when `T` is returned from `transmute` should destroy padding
+    // (even when we use `write_unaligned`, which under the hood uses an untyped copy).
+    p.as_mut_ptr().write_unaligned(mem::transmute((0usize, 0usize)));
+    // Null epresents `None`.
+    assert!(matches!(*p.as_ptr(), None));
+
+    // The second part, with the length, becomes padding.
+    let c = &p as *const _ as *const u8;
+    let _val = *c.add(mem::size_of::<*const u8>()); // Get a padding byte.
+    //~^ERROR: uninitialized
+} }
diff --git a/src/tools/miri/tests/fail/uninit/padding-wide-ptr.stderr b/src/tools/miri/tests/fail/uninit/padding-wide-ptr.stderr
new file mode 100644
index 0000000000000..0da72550b2e08
--- /dev/null
+++ b/src/tools/miri/tests/fail/uninit/padding-wide-ptr.stderr
@@ -0,0 +1,15 @@
+error: Undefined Behavior: using uninitialized data, but this operation requires initialized memory
+  --> $DIR/padding-wide-ptr.rs:LL:CC
+   |
+LL |     let _val = *c.add(mem::size_of::<*const u8>()); // Get a padding byte.
+   |                ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ using uninitialized data, but this operation requires initialized memory
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `main` at $DIR/padding-wide-ptr.rs:LL:CC
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/transmute-pair-uninit.rs b/src/tools/miri/tests/fail/uninit/transmute-pair-uninit.rs
similarity index 61%
rename from src/tools/miri/tests/fail/transmute-pair-uninit.rs
rename to src/tools/miri/tests/fail/uninit/transmute-pair-uninit.rs
index bc95f3cb7ad3a..0ba5520a54468 100644
--- a/src/tools/miri/tests/fail/transmute-pair-uninit.rs
+++ b/src/tools/miri/tests/fail/uninit/transmute-pair-uninit.rs
@@ -1,16 +1,17 @@
 #![feature(core_intrinsics)]
 
-use std::mem;
+use std::mem::{self, MaybeUninit};
 
 fn main() {
-    let x: Option<Box<[u8]>> = unsafe {
+    // This constructs a `(usize, bool)` pair: 9 bytes initialized, the rest not.
+    // Ensure that these 9 bytes are indeed initialized, and the rest is indeed not.
+    let x: MaybeUninit<Box<[u8]>> = unsafe {
         let z = std::intrinsics::add_with_overflow(0usize, 0usize);
-        std::mem::transmute::<(usize, bool), Option<Box<[u8]>>>(z)
+        std::mem::transmute::<(usize, bool), MaybeUninit<Box<[u8]>>>(z)
     };
-    let y = &x;
     // Now read this bytewise. There should be (`ptr_size + 1`) def bytes followed by
     // (`ptr_size - 1`) undef bytes (the padding after the bool) in there.
-    let z: *const u8 = y as *const _ as *const _;
+    let z: *const u8 = &x as *const _ as *const _;
     let first_undef = mem::size_of::<usize>() as isize + 1;
     for i in 0..first_undef {
         let byte = unsafe { *z.offset(i) };
diff --git a/src/tools/miri/tests/fail/transmute-pair-uninit.stderr b/src/tools/miri/tests/fail/uninit/transmute-pair-uninit.stderr
similarity index 100%
rename from src/tools/miri/tests/fail/transmute-pair-uninit.stderr
rename to src/tools/miri/tests/fail/uninit/transmute-pair-uninit.stderr
diff --git a/src/tools/miri/tests/pass/arrays.rs b/src/tools/miri/tests/pass/arrays.rs
index 61b44453e9bd9..b0c6f54cab87c 100644
--- a/src/tools/miri/tests/pass/arrays.rs
+++ b/src/tools/miri/tests/pass/arrays.rs
@@ -61,6 +61,20 @@ fn debug() {
     println!("{:?}", array);
 }
 
+fn huge_zst() {
+    fn id<T>(x: T) -> T { x }
+
+    // A "huge" zero-sized array. Make sure we don't loop over it in any part of Miri.
+    let val = [(); usize::MAX];
+    id(val); // make a copy
+
+    let val = [val; 2];
+    id(val);
+
+    // Also wrap it in a union (which, in particular, hits the logic for computing union padding).
+    let _copy = std::mem::MaybeUninit::new(val);
+}
+
 fn main() {
     assert_eq!(empty_array(), []);
     assert_eq!(index_unsafe(), 20);
@@ -73,4 +87,5 @@ fn main() {
     from();
     eq();
     debug();
+    huge_zst();
 }
diff --git a/src/tools/miri/tests/pass/enums.rs b/src/tools/miri/tests/pass/enums.rs
index 1dafef025e958..9fc61f07c047f 100644
--- a/src/tools/miri/tests/pass/enums.rs
+++ b/src/tools/miri/tests/pass/enums.rs
@@ -132,6 +132,43 @@ fn overaligned_casts() {
     assert_eq!(aligned as u8, 0);
 }
 
+// This hits a corner case in the logic for clearing padding on typed copies.
+fn padding_clear_corner_case() {
+    #[allow(unused)]
+    #[derive(Copy, Clone)]
+    #[repr(C)]
+    pub struct Decoded {
+        /// The scaled mantissa.
+        pub mant: u64,
+        /// The lower error range.
+        pub minus: u64,
+        /// The upper error range.
+        pub plus: u64,
+        /// The shared exponent in base 2.
+        pub exp: i16,
+        /// True when the error range is inclusive.
+        ///
+        /// In IEEE 754, this is true when the original mantissa was even.
+        pub inclusive: bool,
+    }
+
+    #[allow(unused)]
+    #[derive(Copy, Clone)]
+    pub enum FullDecoded {
+        /// Not-a-number.
+        Nan,
+        /// Infinities, either positive or negative.
+        Infinite,
+        /// Zero, either positive or negative.
+        Zero,
+        /// Finite numbers with further decoded fields.
+        Finite(Decoded),
+    }
+
+    let val = FullDecoded::Finite(Decoded { mant: 0, minus: 0, plus: 0, exp: 0, inclusive: false });
+    let _val2 = val; // trigger typed copy
+}
+
 fn main() {
     test(MyEnum::MyEmptyVariant);
     test(MyEnum::MyNewtypeVariant(42));
@@ -141,4 +178,5 @@ fn main() {
     discriminant_overflow();
     more_discriminant_overflow();
     overaligned_casts();
+    padding_clear_corner_case();
 }
diff --git a/src/tools/miri/tests/pass/provenance.rs b/src/tools/miri/tests/pass/provenance.rs
index 9e8a9651b3d96..2e4d240cc48a1 100644
--- a/src/tools/miri/tests/pass/provenance.rs
+++ b/src/tools/miri/tests/pass/provenance.rs
@@ -12,6 +12,7 @@ fn main() {
     bytewise_custom_memcpy();
     bytewise_custom_memcpy_chunked();
     int_load_strip_provenance();
+    maybe_uninit_preserves_partial_provenance();
 }
 
 /// Some basic smoke tests for provenance.
@@ -145,3 +146,24 @@ fn int_load_strip_provenance() {
     let ints: [usize; 1] = unsafe { mem::transmute(ptrs) };
     assert_eq!(ptrs[0] as *const _ as usize, ints[0]);
 }
+
+fn maybe_uninit_preserves_partial_provenance() {
+    // This is the same test as ptr_copy_loses_partial_provenance.rs, but using MaybeUninit and thus
+    // properly preserving partial provenance.
+    unsafe {
+        let mut bytes = [1u8; 16];
+        let bytes = bytes.as_mut_ptr();
+
+        // Put a pointer in the middle.
+        bytes.add(4).cast::<&i32>().write_unaligned(&42);
+        // Copy the entire thing as two pointers but not perfectly
+        // overlapping with the pointer we have in there.
+        let copy = bytes.cast::<[mem::MaybeUninit<*const ()>; 2]>().read_unaligned();
+        let copy_bytes = copy.as_ptr().cast::<u8>();
+        // Now go to the middle of the copy and get the pointer back out.
+        let ptr = copy_bytes.add(4).cast::<*const i32>().read_unaligned();
+        // And deref this to ensure we get the right value.
+        let val = *ptr;
+        assert_eq!(val, 42);
+    }
+}
diff --git a/src/tools/run-make-support/src/external_deps/cargo.rs b/src/tools/run-make-support/src/external_deps/cargo.rs
new file mode 100644
index 0000000000000..b0e045dc80bf8
--- /dev/null
+++ b/src/tools/run-make-support/src/external_deps/cargo.rs
@@ -0,0 +1,7 @@
+use crate::command::Command;
+use crate::env_var;
+
+/// Returns a command that can be used to invoke Cargo.
+pub fn cargo() -> Command {
+    Command::new(env_var("BOOTSTRAP_CARGO"))
+}
diff --git a/src/tools/run-make-support/src/external_deps/mod.rs b/src/tools/run-make-support/src/external_deps/mod.rs
index f7c84724d0e07..80c34a9070fcc 100644
--- a/src/tools/run-make-support/src/external_deps/mod.rs
+++ b/src/tools/run-make-support/src/external_deps/mod.rs
@@ -2,6 +2,7 @@
 //! such as `cc` or `python`.
 
 pub mod c_build;
+pub mod cargo;
 pub mod cc;
 pub mod clang;
 pub mod htmldocck;
diff --git a/src/tools/run-make-support/src/external_deps/rustc.rs b/src/tools/run-make-support/src/external_deps/rustc.rs
index f60ea972839c1..35d983dc607fd 100644
--- a/src/tools/run-make-support/src/external_deps/rustc.rs
+++ b/src/tools/run-make-support/src/external_deps/rustc.rs
@@ -36,10 +36,13 @@ pub struct Rustc {
 
 crate::macros::impl_common_helpers!(Rustc);
 
+pub fn rustc_path() -> String {
+    env_var("RUSTC")
+}
+
 #[track_caller]
 fn setup_common() -> Command {
-    let rustc = env_var("RUSTC");
-    let mut cmd = Command::new(rustc);
+    let mut cmd = Command::new(rustc_path());
     set_host_rpath(&mut cmd);
     cmd
 }
diff --git a/src/tools/run-make-support/src/lib.rs b/src/tools/run-make-support/src/lib.rs
index 980bd37dca8ae..15d813ccf5300 100644
--- a/src/tools/run-make-support/src/lib.rs
+++ b/src/tools/run-make-support/src/lib.rs
@@ -50,6 +50,7 @@ pub use external_deps::{c_build, cc, clang, htmldocck, llvm, python, rustc, rust
 // These rely on external dependencies.
 pub use cc::{cc, cxx, extra_c_flags, extra_cxx_flags, Cc};
 pub use c_build::{build_native_dynamic_lib, build_native_static_lib, build_native_static_lib_optimized, build_native_static_lib_cxx};
+pub use cargo::cargo;
 pub use clang::{clang, Clang};
 pub use htmldocck::htmldocck;
 pub use llvm::{
@@ -58,7 +59,7 @@ pub use llvm::{
     LlvmProfdata, LlvmReadobj,
 };
 pub use python::python_command;
-pub use rustc::{aux_build, bare_rustc, rustc, Rustc};
+pub use rustc::{aux_build, bare_rustc, rustc, rustc_path, Rustc};
 pub use rustdoc::{bare_rustdoc, rustdoc, Rustdoc};
 
 /// [`diff`][mod@diff] is implemented in terms of the [similar] library.
@@ -98,3 +99,4 @@ pub use assertion_helpers::{
 pub use string::{
     count_regex_matches_in_files_with_extension, invalid_utf8_contains, invalid_utf8_not_contains,
 };
+use crate::external_deps::cargo;
diff --git a/tests/codegen/naked-asan.rs b/tests/codegen/naked-asan.rs
index a45b95780f793..ac36018eed3a1 100644
--- a/tests/codegen/naked-asan.rs
+++ b/tests/codegen/naked-asan.rs
@@ -20,3 +20,4 @@ pub extern "x86-interrupt" fn page_fault_handler(_: u64, _: u64) {
 
 // CHECK: #[[ATTRS]] =
 // CHECK-NOT: sanitize_address
+// CHECK: !llvm.module.flags
diff --git a/tests/pretty/tests-are-sorted.pp b/tests/pretty/tests-are-sorted.pp
index 816cd5a5c072c..a4b15dde4530e 100644
--- a/tests/pretty/tests-are-sorted.pp
+++ b/tests/pretty/tests-are-sorted.pp
@@ -83,6 +83,7 @@
 fn a_test() {}
 #[rustc_main]
 #[coverage(off)]
+#[allow(missing_docs)]
 pub fn main() -> () {
     extern crate test;
     test::test_main_static(&[&a_test, &m_test, &z_test])
diff --git a/tests/run-make/rustc-crates-on-stable/rmake.rs b/tests/run-make/rustc-crates-on-stable/rmake.rs
new file mode 100644
index 0000000000000..81cc775c91997
--- /dev/null
+++ b/tests/run-make/rustc-crates-on-stable/rmake.rs
@@ -0,0 +1,36 @@
+//! Checks if selected rustc crates can be compiled on the stable channel (or a "simulation" of it).
+//! These crates are designed to be used by downstream users.
+
+use run_make_support::{cargo, rustc_path, source_root};
+
+fn main() {
+    // Use the stage0 beta cargo for the compilation (it shouldn't really matter which cargo we use)
+    cargo()
+        // Ensure `proc-macro2`'s nightly detection is disabled
+        .env("RUSTC_STAGE", "0")
+        .env("RUSTC", rustc_path())
+        // We want to disallow all nightly features to simulate a stable build
+        .env("RUSTFLAGS", "-Zallow-features=")
+        .arg("build")
+        .arg("--manifest-path")
+        .arg(source_root().join("Cargo.toml"))
+        .args(&[
+            // Avoid depending on transitive rustc crates
+            "--no-default-features",
+            // Emit artifacts in this temporary directory, not in the source_root's `target` folder
+            "--target-dir",
+            "target",
+        ])
+        // Check that these crates can be compiled on "stable"
+        .args(&[
+            "-p",
+            "rustc_type_ir",
+            "-p",
+            "rustc_next_trait_solver",
+            "-p",
+            "rustc_pattern_analysis",
+            "-p",
+            "rustc_lexer",
+        ])
+        .run();
+}
diff --git a/tests/ui/asm/naked-functions-inline.rs b/tests/ui/asm/naked-functions-inline.rs
index cfb38f2e73848..74049e8ecbc7c 100644
--- a/tests/ui/asm/naked-functions-inline.rs
+++ b/tests/ui/asm/naked-functions-inline.rs
@@ -2,37 +2,37 @@
 #![feature(naked_functions)]
 #![crate_type = "lib"]
 
-use std::arch::asm;
+use std::arch::naked_asm;
 
 #[naked]
 pub unsafe extern "C" fn inline_none() {
-    asm!("", options(noreturn));
+    naked_asm!("");
 }
 
 #[naked]
 #[inline]
 //~^ ERROR [E0736]
 pub unsafe extern "C" fn inline_hint() {
-    asm!("", options(noreturn));
+    naked_asm!("");
 }
 
 #[naked]
 #[inline(always)]
 //~^ ERROR [E0736]
 pub unsafe extern "C" fn inline_always() {
-    asm!("", options(noreturn));
+    naked_asm!("");
 }
 
 #[naked]
 #[inline(never)]
 //~^ ERROR [E0736]
 pub unsafe extern "C" fn inline_never() {
-    asm!("", options(noreturn));
+    naked_asm!("");
 }
 
 #[naked]
 #[cfg_attr(all(), inline(never))]
 //~^ ERROR [E0736]
 pub unsafe extern "C" fn conditional_inline_never() {
-    asm!("", options(noreturn));
+    naked_asm!("");
 }
diff --git a/tests/ui/lint/lint-missing-doc-crate.rs b/tests/ui/lint/lint-missing-doc-crate.rs
new file mode 100644
index 0000000000000..afda73cbc603a
--- /dev/null
+++ b/tests/ui/lint/lint-missing-doc-crate.rs
@@ -0,0 +1,4 @@
+// This test checks that we lint on the crate when it's missing a documentation.
+//
+//@ compile-flags: -Dmissing-docs --crate-type=lib
+//~ ERROR missing documentation for the crate
diff --git a/tests/ui/lint/lint-missing-doc-crate.stderr b/tests/ui/lint/lint-missing-doc-crate.stderr
new file mode 100644
index 0000000000000..8efd3a17263fe
--- /dev/null
+++ b/tests/ui/lint/lint-missing-doc-crate.stderr
@@ -0,0 +1,10 @@
+error: missing documentation for the crate
+  --> $DIR/lint-missing-doc-crate.rs:4:47
+   |
+LL |
+   |                                              ^
+   |
+   = note: requested on the command line with `-D missing-docs`
+
+error: aborting due to 1 previous error
+
diff --git a/tests/ui/lint/lint-missing-doc-expect.rs b/tests/ui/lint/lint-missing-doc-expect.rs
new file mode 100644
index 0000000000000..991f65003dc26
--- /dev/null
+++ b/tests/ui/lint/lint-missing-doc-expect.rs
@@ -0,0 +1,13 @@
+// Make sure that `#[expect(missing_docs)]` is always correctly fulfilled.
+
+//@ check-pass
+//@ revisions: lib bin test
+//@ [lib]compile-flags: --crate-type lib
+//@ [bin]compile-flags: --crate-type bin
+//@ [test]compile-flags: --test
+
+#[expect(missing_docs)]
+pub fn foo() {}
+
+#[cfg(bin)]
+fn main() {}
diff --git a/tests/ui/lint/lint-missing-doc-test.rs b/tests/ui/lint/lint-missing-doc-test.rs
new file mode 100644
index 0000000000000..93d4e4a44e928
--- /dev/null
+++ b/tests/ui/lint/lint-missing-doc-test.rs
@@ -0,0 +1,5 @@
+//! This test checks that denying the missing_docs lint does not trigger
+//! on the generated test harness.
+
+//@ check-pass
+//@ compile-flags: --test -Dmissing_docs
diff --git a/tests/ui/traits/next-solver/global-cache-and-parallel-frontend.rs b/tests/ui/traits/next-solver/global-cache-and-parallel-frontend.rs
new file mode 100644
index 0000000000000..2b4f7ba9fa29e
--- /dev/null
+++ b/tests/ui/traits/next-solver/global-cache-and-parallel-frontend.rs
@@ -0,0 +1,27 @@
+//@ compile-flags: -Zthreads=16
+
+// original issue: https://github.com/rust-lang/rust/issues/129112
+// Previously, the "next" solver asserted that each successful solution is only obtained once.
+// This test exhibits a repro that, with next-solver + -Zthreads, triggered that old assert.
+// In the presence of multithreaded solving, it's possible to concurrently evaluate things twice,
+// which leads to replacing already-solved solutions in the global solution cache!
+// We assume this is fine if we check to make sure they are solved the same way each time.
+
+// This test only nondeterministically fails but that's okay, as it will be rerun by CI many times,
+// so it should almost always fail before anything is merged. As other thread tests already exist,
+// we already face this difficulty, probably. If we need to fix this by reducing the error margin,
+// we should improve compiletest.
+
+#[derive(Clone, Eq)] //~ ERROR [E0277]
+pub struct Struct<T>(T);
+
+impl<T: Clone, U> PartialEq<U> for Struct<T>
+where
+    U: Into<Struct<T>> + Clone
+{
+    fn eq(&self, _other: &U) -> bool {
+        todo!()
+    }
+}
+
+fn main() {}
diff --git a/tests/ui/traits/next-solver/global-cache-and-parallel-frontend.stderr b/tests/ui/traits/next-solver/global-cache-and-parallel-frontend.stderr
new file mode 100644
index 0000000000000..65e7dd2ab34bc
--- /dev/null
+++ b/tests/ui/traits/next-solver/global-cache-and-parallel-frontend.stderr
@@ -0,0 +1,24 @@
+error[E0277]: the trait bound `T: Clone` is not satisfied
+  --> $DIR/global-cache-and-parallel-frontend.rs:15:17
+   |
+LL | #[derive(Clone, Eq)]
+   |                 ^^ the trait `Clone` is not implemented for `T`, which is required by `Struct<T>: PartialEq`
+   |
+note: required for `Struct<T>` to implement `PartialEq`
+  --> $DIR/global-cache-and-parallel-frontend.rs:18:19
+   |
+LL | impl<T: Clone, U> PartialEq<U> for Struct<T>
+   |         -----     ^^^^^^^^^^^^     ^^^^^^^^^
+   |         |
+   |         unsatisfied trait bound introduced here
+note: required by a bound in `Eq`
+  --> $SRC_DIR/core/src/cmp.rs:LL:COL
+   = note: this error originates in the derive macro `Eq` (in Nightly builds, run with -Z macro-backtrace for more info)
+help: consider restricting type parameter `T`
+   |
+LL | pub struct Struct<T: std::clone::Clone>(T);
+   |                    +++++++++++++++++++
+
+error: aborting due to 1 previous error
+
+For more information about this error, try `rustc --explain E0277`.