diff --git a/crates/wasmtime/src/runtime/component/component.rs b/crates/wasmtime/src/runtime/component/component.rs index e0a7afb3a626..711cf4450838 100644 --- a/crates/wasmtime/src/runtime/component/component.rs +++ b/crates/wasmtime/src/runtime/component/component.rs @@ -516,7 +516,7 @@ impl Component { .info .resource_drop_wasm_to_array_trampoline .as_ref() - .map(|i| self.func(i).cast()); + .map(|i| self.func(i).cast().into()); VMFuncRef { wasm_call, ..*dtor.func_ref() diff --git a/crates/wasmtime/src/runtime/component/func.rs b/crates/wasmtime/src/runtime/component/func.rs index 1bfcefff8e2b..65687002cb4a 100644 --- a/crates/wasmtime/src/runtime/component/func.rs +++ b/crates/wasmtime/src/runtime/component/func.rs @@ -470,10 +470,11 @@ impl Func { crate::Func::call_unchecked_raw( store, export.func_ref, - core::ptr::slice_from_raw_parts_mut( + NonNull::new(core::ptr::slice_from_raw_parts_mut( space.as_mut_ptr().cast(), mem::size_of_val(space) / mem::size_of::(), - ), + )) + .unwrap(), )?; // Note that `.assume_init_ref()` here is unsafe but we're relying @@ -622,7 +623,8 @@ impl Func { crate::Func::call_unchecked_raw( &mut store, func.func_ref, - core::ptr::slice_from_raw_parts(&post_return_arg, 1).cast_mut(), + NonNull::new(core::ptr::slice_from_raw_parts(&post_return_arg, 1).cast_mut()) + .unwrap(), )?; } diff --git a/crates/wasmtime/src/runtime/component/func/host.rs b/crates/wasmtime/src/runtime/component/func/host.rs index 3ab6581959b8..333d98ddf57c 100644 --- a/crates/wasmtime/src/runtime/component/func/host.rs +++ b/crates/wasmtime/src/runtime/component/func/host.rs @@ -6,7 +6,7 @@ use crate::prelude::*; use crate::runtime::vm::component::{ ComponentInstance, InstanceFlags, VMComponentContext, VMLowering, VMLoweringCallee, }; -use crate::runtime::vm::{VMFuncRef, VMMemoryDefinition, VMOpaqueContext}; +use crate::runtime::vm::{VMFuncRef, VMGlobalDefinition, VMMemoryDefinition, VMOpaqueContext}; use crate::{AsContextMut, CallHook, StoreContextMut, ValRaw}; use alloc::sync::Arc; use core::any::Any; @@ -39,14 +39,14 @@ impl HostFunc { } extern "C" fn entrypoint( - cx: *mut VMOpaqueContext, - data: *mut u8, + cx: NonNull, + data: NonNull, ty: u32, - flags: *mut u8, + flags: NonNull, memory: *mut VMMemoryDefinition, realloc: *mut VMFuncRef, string_encoding: u8, - storage: *mut MaybeUninit, + storage: NonNull>, storage_len: usize, ) -> bool where @@ -54,7 +54,7 @@ impl HostFunc { P: ComponentNamedList + Lift + 'static, R: ComponentNamedList + Lower + 'static, { - let data = data as *const F; + let data = data.as_ptr() as *const F; unsafe { call_host_and_handle_result::(cx, |instance, types, store| { call_host::<_, _, _, _>( @@ -66,7 +66,7 @@ impl HostFunc { memory, realloc, StringEncoding::from_u8(string_encoding).unwrap(), - core::slice::from_raw_parts_mut(storage, storage_len), + NonNull::slice_from_raw_parts(storage, storage_len).as_mut(), |store, args| (*data)(store, args), ) }) @@ -290,7 +290,7 @@ fn validate_inbounds(memory: &[u8], ptr: &ValRaw) -> Result( - cx: *mut VMOpaqueContext, + cx: NonNull, func: impl FnOnce( *mut ComponentInstance, &Arc, @@ -298,7 +298,7 @@ unsafe fn call_host_and_handle_result( ) -> Result<()>, ) -> bool { let cx = VMComponentContext::from_opaque(cx); - let instance = (*cx).instance(); + let instance = cx.as_ref().instance(); let types = (*instance).component_types(); let raw_store = (*instance).store(); let mut store = StoreContextMut(&mut *raw_store.cast()); @@ -422,20 +422,20 @@ fn validate_inbounds_dynamic(abi: &CanonicalAbiInfo, memory: &[u8], ptr: &ValRaw } extern "C" fn dynamic_entrypoint( - cx: *mut VMOpaqueContext, - data: *mut u8, + cx: NonNull, + data: NonNull, ty: u32, - flags: *mut u8, + flags: NonNull, memory: *mut VMMemoryDefinition, realloc: *mut VMFuncRef, string_encoding: u8, - storage: *mut MaybeUninit, + storage: NonNull>, storage_len: usize, ) -> bool where F: Fn(StoreContextMut<'_, T>, &[Val], &mut [Val]) -> Result<()> + Send + Sync + 'static, { - let data = data as *const F; + let data = data.as_ptr() as *const F; unsafe { call_host_and_handle_result(cx, |instance, types, store| { call_host_dynamic::( @@ -447,7 +447,7 @@ where memory, realloc, StringEncoding::from_u8(string_encoding).unwrap(), - core::slice::from_raw_parts_mut(storage, storage_len), + NonNull::slice_from_raw_parts(storage, storage_len).as_mut(), |store, params, results| (*data)(store, params, results), ) }) diff --git a/crates/wasmtime/src/runtime/component/func/options.rs b/crates/wasmtime/src/runtime/component/func/options.rs index ff58df0d5277..20bfa88709f5 100644 --- a/crates/wasmtime/src/runtime/component/func/options.rs +++ b/crates/wasmtime/src/runtime/component/func/options.rs @@ -138,7 +138,7 @@ impl Options { // is an optional configuration in canonical ABI options. unsafe { let memory = self.memory.unwrap().as_ref(); - core::slice::from_raw_parts(memory.base, memory.current_length()) + core::slice::from_raw_parts(memory.base.as_ptr(), memory.current_length()) } } @@ -149,7 +149,7 @@ impl Options { // See comments in `memory` about the unsafety unsafe { let memory = self.memory.unwrap().as_ref(); - core::slice::from_raw_parts_mut(memory.base, memory.current_length()) + core::slice::from_raw_parts_mut(memory.base.as_ptr(), memory.current_length()) } } diff --git a/crates/wasmtime/src/runtime/component/instance.rs b/crates/wasmtime/src/runtime/component/instance.rs index e4a17d8b8ce0..5584cf68b753 100644 --- a/crates/wasmtime/src/runtime/component/instance.rs +++ b/crates/wasmtime/src/runtime/component/instance.rs @@ -12,7 +12,7 @@ use crate::store::{StoreOpaque, Stored}; use crate::{AsContextMut, Engine, Module, StoreContextMut}; use alloc::sync::Arc; use core::marker; -use core::ptr::{self, NonNull}; +use core::ptr::NonNull; use wasmtime_environ::{component::*, EngineOrModuleTypeIndex}; use wasmtime_environ::{EntityIndex, EntityType, Global, PrimaryMap, WasmValType}; @@ -376,7 +376,7 @@ impl InstanceData { CoreDef::InstanceFlags(idx) => { crate::runtime::vm::Export::Global(crate::runtime::vm::ExportGlobal { definition: self.state.instance_flags(*idx).as_raw(), - vmctx: ptr::null_mut(), + vmctx: None, global: Global { wasm_ty: WasmValType::I32, mutability: true, diff --git a/crates/wasmtime/src/runtime/component/resources.rs b/crates/wasmtime/src/runtime/component/resources.rs index ffbafdd2d382..7f2203eb97cb 100644 --- a/crates/wasmtime/src/runtime/component/resources.rs +++ b/crates/wasmtime/src/runtime/component/resources.rs @@ -1034,7 +1034,7 @@ impl ResourceAny { // destructors have al been previously type-checked and are guaranteed // to take one i32 argument and return no results, so the parameters // here should be configured correctly. - unsafe { crate::Func::call_unchecked_raw(store, dtor, &mut args) } + unsafe { crate::Func::call_unchecked_raw(store, dtor, NonNull::from(&mut args)) } } fn lower_to_index(&self, cx: &mut LowerContext<'_, U>, ty: InterfaceType) -> Result { diff --git a/crates/wasmtime/src/runtime/externals/global.rs b/crates/wasmtime/src/runtime/externals/global.rs index 785376632bb6..b581c64b23d6 100644 --- a/crates/wasmtime/src/runtime/externals/global.rs +++ b/crates/wasmtime/src/runtime/externals/global.rs @@ -108,7 +108,7 @@ impl Global { unsafe { let store = store.as_context_mut(); let mut store = AutoAssertNoGc::new(store.0); - let definition = &*store[self.0].definition; + let definition = store[self.0].definition.as_ref(); match self._ty(&store).content() { ValType::I32 => Val::from(*definition.as_i32()), ValType::I64 => Val::from(*definition.as_i64()), @@ -181,7 +181,7 @@ impl Global { val.ensure_matches_ty(&store, global_ty.content()) .context("type mismatch: attempt to set global to value of wrong type")?; unsafe { - let definition = &mut *store[self.0].definition; + let definition = store[self.0].definition.as_mut(); match val { Val::I32(i) => *definition.as_i32_mut() = i, Val::I64(i) => *definition.as_i64_mut() = i, @@ -222,7 +222,7 @@ impl Global { return; } - if let Some(gc_ref) = unsafe { (*store[self.0].definition).as_gc_ref() } { + if let Some(gc_ref) = unsafe { store[self.0].definition.as_ref().as_gc_ref() } { let gc_ref = NonNull::from(gc_ref); let gc_ref = SendSyncPtr::new(gc_ref); unsafe { @@ -240,9 +240,10 @@ impl Global { .global .wasm_ty .canonicalize_for_runtime_usage(&mut |module_index| { - crate::runtime::vm::Instance::from_vmctx(wasmtime_export.vmctx, |instance| { - instance.engine_type_index(module_index) - }) + crate::runtime::vm::Instance::from_vmctx( + wasmtime_export.vmctx.unwrap(), + |instance| instance.engine_type_index(module_index), + ) }); Global(store.store_data_mut().insert(wasmtime_export)) @@ -254,7 +255,7 @@ impl Global { pub(crate) fn vmimport(&self, store: &StoreOpaque) -> crate::runtime::vm::VMGlobalImport { crate::runtime::vm::VMGlobalImport { - from: store[self.0].definition, + from: store[self.0].definition.into(), } } @@ -264,7 +265,7 @@ impl Global { /// `StoreData` multiple times and becomes multiple `wasmtime::Global`s, /// this hash key will be consistent across all of these globals. pub(crate) fn hash_key(&self, store: &StoreOpaque) -> impl core::hash::Hash + Eq + use<> { - store[self.0].definition as usize + store[self.0].definition.as_ptr() as usize } } diff --git a/crates/wasmtime/src/runtime/externals/table.rs b/crates/wasmtime/src/runtime/externals/table.rs index 3da105fe4cf8..fd404859e7de 100644 --- a/crates/wasmtime/src/runtime/externals/table.rs +++ b/crates/wasmtime/src/runtime/externals/table.rs @@ -141,7 +141,7 @@ impl Table { vmctx, definition, .. } = store[self.0]; crate::runtime::vm::Instance::from_vmctx(vmctx, |handle| { - let idx = handle.table_index(&*definition); + let idx = handle.table_index(definition.as_ref()); handle.get_defined_table_with_lazy_init(idx, lazy_init_range) }) } @@ -229,7 +229,7 @@ impl Table { pub(crate) fn internal_size(&self, store: &StoreOpaque) -> u64 { // unwrap here should be ok because the runtime should always guarantee // that we can fit the number of elements in a 64-bit integer. - unsafe { u64::try_from((*store[self.0].definition).current_elements).unwrap() } + unsafe { u64::try_from(store[self.0].definition.as_ref().current_elements).unwrap() } } /// Grows the size of this table by `delta` more elements, initialization @@ -262,7 +262,7 @@ impl Table { match (*table).grow(delta, init, store)? { Some(size) => { let vm = (*table).vmtable(); - *store[self.0].definition = vm; + store[self.0].definition.write(vm); // unwrap here should be ok because the runtime should always guarantee // that we can fit the table size in a 64-bit integer. Ok(u64::try_from(size).unwrap()) @@ -421,8 +421,8 @@ impl Table { pub(crate) fn vmimport(&self, store: &StoreOpaque) -> crate::runtime::vm::VMTableImport { let export = &store[self.0]; crate::runtime::vm::VMTableImport { - from: export.definition, - vmctx: export.vmctx, + from: export.definition.into(), + vmctx: export.vmctx.into(), } } @@ -433,7 +433,7 @@ impl Table { /// this hash key will be consistent across all of these tables. #[allow(dead_code)] // Not used yet, but added for consistency. pub(crate) fn hash_key(&self, store: &StoreOpaque) -> impl core::hash::Hash + Eq + use<'_> { - store[self.0].definition as usize + store[self.0].definition.as_ptr() as usize } } diff --git a/crates/wasmtime/src/runtime/func.rs b/crates/wasmtime/src/runtime/func.rs index edf7264641a3..fe7e4129a42b 100644 --- a/crates/wasmtime/src/runtime/func.rs +++ b/crates/wasmtime/src/runtime/func.rs @@ -1060,13 +1060,14 @@ impl Func { let mut store = store.as_context_mut(); let data = &store.0.store_data()[self.0]; let func_ref = data.export().func_ref; + let params_and_returns = NonNull::new(params_and_returns).unwrap_or(NonNull::from(&mut [])); Self::call_unchecked_raw(&mut store, func_ref, params_and_returns) } pub(crate) unsafe fn call_unchecked_raw( store: &mut StoreContextMut<'_, T>, func_ref: NonNull, - params_and_returns: *mut [ValRaw], + params_and_returns: NonNull<[ValRaw]>, ) -> Result<()> { invoke_wasm_and_catch_traps(store, |caller, vm| { func_ref.as_ref().array_call( @@ -1320,18 +1321,18 @@ impl Func { }; VMFunctionImport { wasm_call: if let Some(wasm_call) = f.as_ref().wasm_call { - wasm_call + wasm_call.into() } else { // Assert that this is a array-call function, since those // are the only ones that could be missing a `wasm_call` // trampoline. - let _ = VMArrayCallHostFuncContext::from_opaque(f.as_ref().vmctx); + let _ = VMArrayCallHostFuncContext::from_opaque(f.as_ref().vmctx.as_non_null()); let sig = self.type_index(store.store_data()); module.wasm_to_array_trampoline(sig).expect( "if the wasm is importing a function of a given type, it must have the \ type's trampoline", - ) + ).into() }, array_call: f.as_ref().array_call, vmctx: f.as_ref().vmctx, @@ -1594,7 +1595,7 @@ impl Func { /// can pass to the called wasm function, if desired. pub(crate) fn invoke_wasm_and_catch_traps( store: &mut StoreContextMut<'_, T>, - closure: impl FnMut(*mut VMContext, Option>) -> bool, + closure: impl FnMut(NonNull, Option>) -> bool, ) -> Result<()> { unsafe { let exit = enter_wasm(store); @@ -2026,7 +2027,7 @@ pub struct Caller<'a, T> { } impl Caller<'_, T> { - unsafe fn with(caller: *mut VMContext, f: F) -> R + unsafe fn with(caller: NonNull, f: F) -> R where // The closure must be valid for any `Caller` it is given; it doesn't // get to choose the `Caller`'s lifetime. @@ -2034,7 +2035,6 @@ impl Caller<'_, T> { // And the return value must not borrow from the caller/store. R: 'static, { - debug_assert!(!caller.is_null()); crate::runtime::vm::InstanceAndStore::from_vmctx(caller, |pair| { let (instance, mut store) = pair.unpack_context_mut::(); @@ -2294,9 +2294,9 @@ impl HostContext { } unsafe extern "C" fn array_call_trampoline( - callee_vmctx: *mut VMOpaqueContext, - caller_vmctx: *mut VMOpaqueContext, - args: *mut ValRaw, + callee_vmctx: NonNull, + caller_vmctx: NonNull, + args: NonNull, args_len: usize, ) -> bool where @@ -2311,10 +2311,10 @@ impl HostContext { // should be part of this closure, and the long-jmp-ing // happens after the closure in handling the result. let run = move |mut caller: Caller<'_, T>| { - let args = - core::slice::from_raw_parts_mut(args.cast::>(), args_len); + let mut args = + NonNull::slice_from_raw_parts(args.cast::>(), args_len); let vmctx = VMArrayCallHostFuncContext::from_opaque(callee_vmctx); - let state = (*vmctx).host_state(); + let state = vmctx.as_ref().host_state(); // Double-check ourselves in debug mode, but we control // the `Any` here so an unsafe downcast should also @@ -2333,7 +2333,7 @@ impl HostContext { } else { unsafe { AutoAssertNoGc::disabled(caller.store.0) } }; - let params = P::load(&mut store, args); + let params = P::load(&mut store, args.as_mut()); let _ = &mut store; drop(store); @@ -2352,7 +2352,7 @@ impl HostContext { } else { unsafe { AutoAssertNoGc::disabled(caller.store.0) } }; - let ret = ret.store(&mut store, args)?; + let ret = ret.store(&mut store, args.as_mut())?; Ok(ret) } }; @@ -2534,7 +2534,7 @@ impl HostFunc { pub(crate) fn func_ref(&self) -> &VMFuncRef { match &self.ctx { - HostContext::Array(ctx) => unsafe { (*ctx.get()).func_ref() }, + HostContext::Array(ctx) => unsafe { ctx.get().as_ref().func_ref() }, } } diff --git a/crates/wasmtime/src/runtime/func/typed.rs b/crates/wasmtime/src/runtime/func/typed.rs index 5c31182e6b2e..241b34c2eeb7 100644 --- a/crates/wasmtime/src/runtime/func/typed.rs +++ b/crates/wasmtime/src/runtime/func/typed.rs @@ -217,6 +217,7 @@ where let storage: *mut Storage<_, _> = storage; let storage = storage.cast::(); let storage = core::ptr::slice_from_raw_parts_mut(storage, storage_len); + let storage = NonNull::new(storage).unwrap(); func_ref .as_ref() .array_call(vm, VMOpaqueContext::from_vmcontext(caller), storage) diff --git a/crates/wasmtime/src/runtime/instance.rs b/crates/wasmtime/src/runtime/instance.rs index 5e3117fe4383..0a9247797c00 100644 --- a/crates/wasmtime/src/runtime/instance.rs +++ b/crates/wasmtime/src/runtime/instance.rs @@ -365,7 +365,7 @@ impl Instance { f.func_ref.as_ref().array_call( vm, VMOpaqueContext::from_vmcontext(caller_vmctx), - &mut [], + NonNull::from(&mut []), ) })?; } @@ -712,18 +712,20 @@ impl OwnedImports { }); } crate::runtime::vm::Export::Global(g) => { - self.globals.push(VMGlobalImport { from: g.definition }); + self.globals.push(VMGlobalImport { + from: g.definition.into(), + }); } crate::runtime::vm::Export::Table(t) => { self.tables.push(VMTableImport { - from: t.definition, - vmctx: t.vmctx, + from: t.definition.into(), + vmctx: t.vmctx.into(), }); } crate::runtime::vm::Export::Memory(m) => { self.memories.push(VMMemoryImport { - from: m.definition, - vmctx: m.vmctx, + from: m.definition.into(), + vmctx: m.vmctx.into(), index: m.index, }); } @@ -818,7 +820,9 @@ impl InstancePre { // Wasm-to-native trampoline. debug_assert!(matches!(f.host_ctx(), crate::HostContext::Array(_))); func_refs.push(VMFuncRef { - wasm_call: module.wasm_to_array_trampoline(f.sig_index()), + wasm_call: module + .wasm_to_array_trampoline(f.sig_index()) + .map(|f| f.into()), ..*f.func_ref() }); } diff --git a/crates/wasmtime/src/runtime/memory.rs b/crates/wasmtime/src/runtime/memory.rs index 8ef0b1a51a11..d557e749267c 100644 --- a/crates/wasmtime/src/runtime/memory.rs +++ b/crates/wasmtime/src/runtime/memory.rs @@ -365,9 +365,9 @@ impl Memory { pub fn data<'a, T: 'a>(&self, store: impl Into>) -> &'a [u8] { unsafe { let store = store.into(); - let definition = &*store[self.0].definition; + let definition = store[self.0].definition.as_ref(); debug_assert!(!self.ty(store).is_shared()); - slice::from_raw_parts(definition.base, definition.current_length()) + slice::from_raw_parts(definition.base.as_ptr(), definition.current_length()) } } @@ -382,9 +382,9 @@ impl Memory { pub fn data_mut<'a, T: 'a>(&self, store: impl Into>) -> &'a mut [u8] { unsafe { let store = store.into(); - let definition = &*store[self.0].definition; + let definition = store[self.0].definition.as_ref(); debug_assert!(!self.ty(store).is_shared()); - slice::from_raw_parts_mut(definition.base, definition.current_length()) + slice::from_raw_parts_mut(definition.base.as_ptr(), definition.current_length()) } } @@ -431,7 +431,7 @@ impl Memory { /// /// Panics if this memory doesn't belong to `store`. pub fn data_ptr(&self, store: impl AsContext) -> *mut u8 { - unsafe { (*store.as_context()[self.0].definition).base } + unsafe { store.as_context()[self.0].definition.as_ref().base.as_ptr() } } /// Returns the byte length of this memory. @@ -459,7 +459,7 @@ impl Memory { } pub(crate) fn internal_data_size(&self, store: &StoreOpaque) -> usize { - unsafe { (*store[self.0].definition).current_length() } + unsafe { store[self.0].definition.as_ref().current_length() } } /// Returns the size, in units of pages, of this Wasm memory. @@ -588,7 +588,7 @@ impl Memory { match (*mem).grow(delta, Some(store))? { Some(size) => { let vm = (*mem).vmmemory(); - *store[self.0].definition = vm; + store[self.0].definition.write(vm); let page_size = (*mem).page_size(); Ok(u64::try_from(size).unwrap() / page_size) } @@ -644,8 +644,8 @@ impl Memory { pub(crate) fn vmimport(&self, store: &StoreOpaque) -> crate::runtime::vm::VMMemoryImport { let export = &store[self.0]; crate::runtime::vm::VMMemoryImport { - from: export.definition, - vmctx: export.vmctx, + from: export.definition.into(), + vmctx: export.vmctx.into(), index: export.index, } } @@ -660,7 +660,7 @@ impl Memory { /// `StoreData` multiple times and becomes multiple `wasmtime::Memory`s, /// this hash key will be consistent across all of these memories. pub(crate) fn hash_key(&self, store: &StoreOpaque) -> impl core::hash::Hash + Eq + use<> { - store[self.0].definition as usize + store[self.0].definition.as_ptr() as usize } } @@ -877,8 +877,8 @@ impl SharedMemory { /// currently be done unsafely. pub fn data(&self) -> &[UnsafeCell] { unsafe { - let definition = &*self.vm.vmmemory_ptr(); - slice::from_raw_parts(definition.base.cast(), definition.current_length()) + let definition = self.vm.vmmemory_ptr().as_ref(); + slice::from_raw_parts(definition.base.as_ptr().cast(), definition.current_length()) } } @@ -1000,8 +1000,8 @@ impl SharedMemory { pub(crate) fn vmimport(&self, store: &mut StoreOpaque) -> crate::runtime::vm::VMMemoryImport { let export_memory = generate_memory_export(store, &self.ty(), Some(&self.vm)).unwrap(); VMMemoryImport { - from: export_memory.definition, - vmctx: export_memory.vmctx, + from: export_memory.definition.into(), + vmctx: export_memory.vmctx.into(), index: export_memory.index, } } diff --git a/crates/wasmtime/src/runtime/store.rs b/crates/wasmtime/src/runtime/store.rs index 28a88054cd63..edb9260dc2fb 100644 --- a/crates/wasmtime/src/runtime/store.rs +++ b/crates/wasmtime/src/runtime/store.rs @@ -1520,9 +1520,9 @@ impl StoreOpaque { // First enumerate all the host-created globals. for global in temp.host_globals.iter() { let export = ExportGlobal { - definition: &mut (*global.get()).global as *mut _, - vmctx: core::ptr::null_mut(), - global: (*global.get()).ty.to_wasm_type(), + definition: NonNull::from(&mut global.get().as_mut().global), + vmctx: None, + global: global.get().as_ref().ty.to_wasm_type(), }; let global = Global::from_wasmtime_global(export, temp.store); f(temp.store, global); @@ -1924,12 +1924,12 @@ impl StoreOpaque { } #[inline] - pub fn vmruntime_limits(&self) -> *mut VMRuntimeLimits { - &self.runtime_limits as *const VMRuntimeLimits as *mut VMRuntimeLimits + pub fn vmruntime_limits(&self) -> NonNull { + NonNull::from(&self.runtime_limits) } #[inline] - pub fn default_caller(&self) -> *mut VMContext { + pub fn default_caller(&self) -> NonNull { self.default_caller.vmctx() } @@ -2753,7 +2753,7 @@ impl StoreInner { // Also, note that when this update is performed while Wasm is // on the stack, the Wasm will reload the new value once we // return into it. - let epoch_deadline = unsafe { (*self.vmruntime_limits()).epoch_deadline.get_mut() }; + let epoch_deadline = unsafe { self.vmruntime_limits().as_mut().epoch_deadline.get_mut() }; *epoch_deadline = self.engine().current_epoch() + delta; } @@ -2785,7 +2785,7 @@ impl StoreInner { // Safety: this is safe because, as above, it is only invoked // from within `new_epoch` which is called from guest Wasm // code, which will have an exclusive borrow on the Store. - let epoch_deadline = unsafe { (*self.vmruntime_limits()).epoch_deadline.get_mut() }; + let epoch_deadline = unsafe { self.vmruntime_limits().as_mut().epoch_deadline.get_mut() }; *epoch_deadline } } diff --git a/crates/wasmtime/src/runtime/store/func_refs.rs b/crates/wasmtime/src/runtime/store/func_refs.rs index 9b195e42af7b..2d677213ed28 100644 --- a/crates/wasmtime/src/runtime/store/func_refs.rs +++ b/crates/wasmtime/src/runtime/store/func_refs.rs @@ -57,7 +57,7 @@ impl FuncRefs { debug_assert!(func_ref.wasm_call.is_none()); // Debug assert that the vmctx is a `VMArrayCallHostFuncContext` as // that is the only kind that can have holes. - let _ = unsafe { VMArrayCallHostFuncContext::from_opaque(func_ref.vmctx) }; + let _ = unsafe { VMArrayCallHostFuncContext::from_opaque(func_ref.vmctx.as_non_null()) }; let func_ref = self.bump.alloc(func_ref); let unpatched = SendSyncPtr::from(func_ref); @@ -75,9 +75,11 @@ impl FuncRefs { // Debug assert that the vmctx is a `VMArrayCallHostFuncContext` as // that is the only kind that can have holes. - let _ = VMArrayCallHostFuncContext::from_opaque(func_ref.vmctx); + let _ = VMArrayCallHostFuncContext::from_opaque(func_ref.vmctx.as_non_null()); - func_ref.wasm_call = modules.wasm_to_array_trampoline(func_ref.type_index); + func_ref.wasm_call = modules + .wasm_to_array_trampoline(func_ref.type_index) + .map(|f| f.into()); func_ref.wasm_call.is_none() } }); diff --git a/crates/wasmtime/src/runtime/trampoline/func.rs b/crates/wasmtime/src/runtime/trampoline/func.rs index 60f40745e34a..6da71d14e0f9 100644 --- a/crates/wasmtime/src/runtime/trampoline/func.rs +++ b/crates/wasmtime/src/runtime/trampoline/func.rs @@ -4,6 +4,7 @@ use crate::prelude::*; use crate::runtime::vm::{StoreBox, VMArrayCallHostFuncContext, VMContext, VMOpaqueContext}; use crate::type_registry::RegisteredType; use crate::{FuncType, ValRaw}; +use core::ptr::NonNull; struct TrampolineState { func: F, @@ -21,13 +22,13 @@ struct TrampolineState { /// /// Also shepherds panics and traps across Wasm. unsafe extern "C" fn array_call_shim( - vmctx: *mut VMOpaqueContext, - caller_vmctx: *mut VMOpaqueContext, - values_vec: *mut ValRaw, + vmctx: NonNull, + caller_vmctx: NonNull, + values_vec: NonNull, values_vec_len: usize, ) -> bool where - F: Fn(*mut VMContext, &mut [ValRaw]) -> Result<()> + 'static, + F: Fn(NonNull, &mut [ValRaw]) -> Result<()> + 'static, { // Be sure to catch Rust panics to manually shepherd them across the wasm // boundary, and then otherwise delegate as normal. @@ -36,11 +37,11 @@ where // Double-check ourselves in debug mode, but we control // the `Any` here so an unsafe downcast should also // work. - let state = (*vmctx).host_state(); + let state = vmctx.as_ref().host_state(); debug_assert!(state.is::>()); let state = &*(state as *const _ as *const TrampolineState); - let values_vec = core::slice::from_raw_parts_mut(values_vec, values_vec_len); - (state.func)(VMContext::from_opaque(caller_vmctx), values_vec) + let mut values_vec = NonNull::slice_from_raw_parts(values_vec, values_vec_len); + (state.func)(VMContext::from_opaque(caller_vmctx), values_vec.as_mut()) }) } @@ -49,7 +50,7 @@ pub fn create_array_call_function( func: F, ) -> Result> where - F: Fn(*mut VMContext, &mut [ValRaw]) -> Result<()> + Send + Sync + 'static, + F: Fn(NonNull, &mut [ValRaw]) -> Result<()> + Send + Sync + 'static, { let array_call = array_call_shim::; diff --git a/crates/wasmtime/src/runtime/trampoline/global.rs b/crates/wasmtime/src/runtime/trampoline/global.rs index d7ecd7b87d4b..cf4076f3435f 100644 --- a/crates/wasmtime/src/runtime/trampoline/global.rs +++ b/crates/wasmtime/src/runtime/trampoline/global.rs @@ -1,7 +1,7 @@ use crate::runtime::vm::{StoreBox, VMGlobalDefinition}; use crate::store::{AutoAssertNoGc, StoreOpaque}; use crate::{GlobalType, Mutability, Result, RootedGcRefImpl, Val}; -use core::ptr; +use core::ptr::{self, NonNull}; #[repr(C)] pub struct VMHostGlobalContext { @@ -28,7 +28,7 @@ pub fn generate_global_export( let mut store = AutoAssertNoGc::new(store); let definition = unsafe { - let global = &mut (*ctx.get()).global; + let global = &mut ctx.get().as_mut().global; match val { Val::I32(x) => *global.as_i32_mut() = x, Val::I64(x) => *global.as_i64_mut() = x, @@ -63,8 +63,8 @@ pub fn generate_global_export( store.host_globals().push(ctx); Ok(crate::runtime::vm::ExportGlobal { - definition, - vmctx: ptr::null_mut(), + definition: NonNull::from(definition), + vmctx: None, global, }) } diff --git a/crates/wasmtime/src/runtime/vm.rs b/crates/wasmtime/src/runtime/vm.rs index 5b0cc20ab1f0..57c0a54269cf 100644 --- a/crates/wasmtime/src/runtime/vm.rs +++ b/crates/wasmtime/src/runtime/vm.rs @@ -29,6 +29,7 @@ mod imports; mod instance; mod memory; mod mmap_vec; +mod provenance; mod send_sync_ptr; mod send_sync_unsafe_cell; mod store_box; @@ -77,6 +78,7 @@ pub use crate::runtime::vm::memory::{ }; pub use crate::runtime::vm::mmap_vec::MmapVec; pub use crate::runtime::vm::mpk::MpkEnabled; +pub use crate::runtime::vm::provenance::*; pub use crate::runtime::vm::store_box::*; #[cfg(feature = "std")] pub use crate::runtime::vm::sys::mmap::open_file_for_mmap; diff --git a/crates/wasmtime/src/runtime/vm/component.rs b/crates/wasmtime/src/runtime/vm/component.rs index 869542af3ce2..41ada5769d02 100644 --- a/crates/wasmtime/src/runtime/vm/component.rs +++ b/crates/wasmtime/src/runtime/vm/component.rs @@ -9,7 +9,7 @@ use crate::prelude::*; use crate::runtime::vm::{ SendSyncPtr, VMArrayCallFunction, VMFuncRef, VMGlobalDefinition, VMMemoryDefinition, - VMOpaqueContext, VMStore, VMStoreRawPtr, VMWasmCallFunction, ValRaw, + VMOpaqueContext, VMStore, VMStoreRawPtr, VMWasmCallFunction, ValRaw, VmPtr, }; use alloc::alloc::Layout; use alloc::sync::Arc; @@ -110,14 +110,14 @@ pub struct ComponentInstance { // Needs benchmarking one way or another though to figure out what the best // balance is here. pub type VMLoweringCallee = extern "C" fn( - vmctx: *mut VMOpaqueContext, - data: *mut u8, + vmctx: NonNull, + data: NonNull, ty: u32, - flags: *mut u8, + flags: NonNull, opt_memory: *mut VMMemoryDefinition, opt_realloc: *mut VMFuncRef, string_encoding: u8, - args_and_results: *mut mem::MaybeUninit, + args_and_results: NonNull>, nargs_and_results: usize, ) -> bool; @@ -161,13 +161,13 @@ impl ComponentInstance { /// pointer and it cannot be proven statically that it's safe to get a /// mutable reference at this time to the instance from `vmctx`. pub unsafe fn from_vmctx( - vmctx: *mut VMComponentContext, + vmctx: NonNull, f: impl FnOnce(&mut ComponentInstance) -> R, ) -> R { - let ptr = vmctx + let mut ptr = vmctx .byte_sub(mem::size_of::()) .cast::(); - f(&mut *ptr) + f(ptr.as_mut()) } /// Returns the layout corresponding to what would be an allocation of a @@ -231,19 +231,22 @@ impl ComponentInstance { (*ptr.as_ptr()).initialize_vmctx(); } - fn vmctx(&self) -> *mut VMComponentContext { + fn vmctx(&self) -> NonNull { let addr = &raw const self.vmctx; - Strict::with_addr(self.vmctx_self_reference.as_ptr(), Strict::addr(addr)) + let ret = Strict::with_addr(self.vmctx_self_reference.as_ptr(), Strict::addr(addr)); + NonNull::new(ret).unwrap() } unsafe fn vmctx_plus_offset(&self, offset: u32) -> *const T { self.vmctx() + .as_ptr() .byte_add(usize::try_from(offset).unwrap()) .cast() } unsafe fn vmctx_plus_offset_mut(&mut self, offset: u32) -> *mut T { self.vmctx() + .as_ptr() .byte_add(usize::try_from(offset).unwrap()) .cast() } @@ -329,10 +332,10 @@ impl ComponentInstance { let offset = self.offsets.trampoline_func_ref(idx); let ret = self.vmctx_plus_offset::(offset); debug_assert!( - mem::transmute::>, usize>((*ret).wasm_call) + mem::transmute::>, usize>((*ret).wasm_call) != INVALID_PTR ); - debug_assert!((*ret).vmctx as usize != INVALID_PTR); + debug_assert!((*ret).vmctx.as_ptr() as usize != INVALID_PTR); NonNull::new(ret.cast_mut()).unwrap() } } @@ -345,11 +348,16 @@ impl ComponentInstance { /// /// Note that it should be a property of the component model that the `ptr` /// here is never needed prior to it being configured here in the instance. - pub fn set_runtime_memory(&mut self, idx: RuntimeMemoryIndex, ptr: *mut VMMemoryDefinition) { + pub fn set_runtime_memory( + &mut self, + idx: RuntimeMemoryIndex, + ptr: NonNull, + ) { unsafe { - debug_assert!(!ptr.is_null()); - let storage = self.vmctx_plus_offset_mut(self.offsets.runtime_memory(idx)); - debug_assert!(*storage as usize == INVALID_PTR); + let storage = self.vmctx_plus_offset_mut::>( + self.offsets.runtime_memory(idx), + ); + debug_assert!((*storage).as_ptr() as usize == INVALID_PTR); *storage = ptr; } } @@ -403,10 +411,10 @@ impl ComponentInstance { debug_assert!(*self.vmctx_plus_offset::(offset) == INVALID_PTR); let vmctx = VMOpaqueContext::from_vmcomponent(self.vmctx()); *self.vmctx_plus_offset_mut(offset) = VMFuncRef { - wasm_call: Some(wasm_call), - array_call, + wasm_call: Some(wasm_call.into()), + array_call: array_call.into(), type_index, - vmctx, + vmctx: vmctx.into(), }; } } @@ -449,7 +457,7 @@ impl ComponentInstance { let i = RuntimeComponentInstanceIndex::from_u32(i); let mut def = VMGlobalDefinition::new(); *def.as_i32_mut() = FLAG_MAY_ENTER | FLAG_MAY_LEAVE; - *self.instance_flags(i).as_raw() = def; + self.instance_flags(i).as_raw().write(def); } // In debug mode set non-null bad values to all "pointer looking" bits @@ -707,7 +715,11 @@ impl OwnedComponentInstance { } /// See `ComponentInstance::set_runtime_memory` - pub fn set_runtime_memory(&mut self, idx: RuntimeMemoryIndex, ptr: *mut VMMemoryDefinition) { + pub fn set_runtime_memory( + &mut self, + idx: RuntimeMemoryIndex, + ptr: NonNull, + ) { unsafe { self.instance_mut().set_runtime_memory(idx, ptr) } } @@ -780,9 +792,9 @@ impl VMComponentContext { /// Helper function to cast between context types using a debug assertion to /// protect against some mistakes. #[inline] - pub unsafe fn from_opaque(opaque: *mut VMOpaqueContext) -> *mut VMComponentContext { + pub unsafe fn from_opaque(opaque: NonNull) -> NonNull { // See comments in `VMContext::from_opaque` for this debug assert - debug_assert_eq!((*opaque).magic, VMCOMPONENT_MAGIC); + debug_assert_eq!(opaque.as_ref().magic, VMCOMPONENT_MAGIC); opaque.cast() } } @@ -790,7 +802,7 @@ impl VMComponentContext { impl VMOpaqueContext { /// Helper function to clearly indicate the cast desired #[inline] - pub fn from_vmcomponent(ptr: *mut VMComponentContext) -> *mut VMOpaqueContext { + pub fn from_vmcomponent(ptr: NonNull) -> NonNull { ptr.cast() } } @@ -808,55 +820,55 @@ impl InstanceFlags { /// /// This is a raw pointer argument which needs to be valid for the lifetime /// that `InstanceFlags` is used. - pub unsafe fn from_raw(ptr: *mut u8) -> InstanceFlags { - InstanceFlags(SendSyncPtr::new(NonNull::new(ptr.cast()).unwrap())) + pub unsafe fn from_raw(ptr: NonNull) -> InstanceFlags { + InstanceFlags(SendSyncPtr::from(ptr)) } #[inline] pub unsafe fn may_leave(&self) -> bool { - *(*self.as_raw()).as_i32() & FLAG_MAY_LEAVE != 0 + *self.as_raw().as_ref().as_i32() & FLAG_MAY_LEAVE != 0 } #[inline] pub unsafe fn set_may_leave(&mut self, val: bool) { if val { - *(*self.as_raw()).as_i32_mut() |= FLAG_MAY_LEAVE; + *self.as_raw().as_mut().as_i32_mut() |= FLAG_MAY_LEAVE; } else { - *(*self.as_raw()).as_i32_mut() &= !FLAG_MAY_LEAVE; + *self.as_raw().as_mut().as_i32_mut() &= !FLAG_MAY_LEAVE; } } #[inline] pub unsafe fn may_enter(&self) -> bool { - *(*self.as_raw()).as_i32() & FLAG_MAY_ENTER != 0 + *self.as_raw().as_ref().as_i32() & FLAG_MAY_ENTER != 0 } #[inline] pub unsafe fn set_may_enter(&mut self, val: bool) { if val { - *(*self.as_raw()).as_i32_mut() |= FLAG_MAY_ENTER; + *self.as_raw().as_mut().as_i32_mut() |= FLAG_MAY_ENTER; } else { - *(*self.as_raw()).as_i32_mut() &= !FLAG_MAY_ENTER; + *self.as_raw().as_mut().as_i32_mut() &= !FLAG_MAY_ENTER; } } #[inline] pub unsafe fn needs_post_return(&self) -> bool { - *(*self.as_raw()).as_i32() & FLAG_NEEDS_POST_RETURN != 0 + *self.as_raw().as_ref().as_i32() & FLAG_NEEDS_POST_RETURN != 0 } #[inline] pub unsafe fn set_needs_post_return(&mut self, val: bool) { if val { - *(*self.as_raw()).as_i32_mut() |= FLAG_NEEDS_POST_RETURN; + *self.as_raw().as_mut().as_i32_mut() |= FLAG_NEEDS_POST_RETURN; } else { - *(*self.as_raw()).as_i32_mut() &= !FLAG_NEEDS_POST_RETURN; + *self.as_raw().as_mut().as_i32_mut() &= !FLAG_NEEDS_POST_RETURN; } } #[inline] - pub fn as_raw(&self) -> *mut VMGlobalDefinition { - self.0.as_ptr() + pub fn as_raw(&self) -> NonNull { + self.0.as_non_null() } } diff --git a/crates/wasmtime/src/runtime/vm/component/libcalls.rs b/crates/wasmtime/src/runtime/vm/component/libcalls.rs index fe907f68cd7b..799462fb97b9 100644 --- a/crates/wasmtime/src/runtime/vm/component/libcalls.rs +++ b/crates/wasmtime/src/runtime/vm/component/libcalls.rs @@ -5,6 +5,7 @@ use crate::runtime::vm::component::{ComponentInstance, VMComponentContext}; use crate::runtime::vm::HostResultHasUnwindSentinel; use core::cell::Cell; use core::convert::Infallible; +use core::ptr::NonNull; use core::slice; use wasmtime_environ::component::TypeResourceTableIndex; @@ -19,7 +20,7 @@ macro_rules! signature { (@ty u32) => (u32); (@ty u64) => (u64); (@ty bool) => (bool); - (@ty vmctx) => (*mut VMComponentContext); + (@ty vmctx) => (NonNull); } /// Defines a `VMComponentBuiltins` structure which contains any builtins such @@ -59,6 +60,7 @@ wasmtime_environ::foreach_builtin_component_function!(define_builtins); #[allow(improper_ctypes_definitions)] mod trampolines { use super::VMComponentContext; + use core::ptr::NonNull; macro_rules! shims { ( @@ -486,18 +488,26 @@ fn inflate_latin1_bytes(dst: &mut [u16], latin1_bytes_so_far: usize) -> &mut [u1 return rest; } -unsafe fn resource_new32(vmctx: *mut VMComponentContext, resource: u32, rep: u32) -> Result { +unsafe fn resource_new32( + vmctx: NonNull, + resource: u32, + rep: u32, +) -> Result { let resource = TypeResourceTableIndex::from_u32(resource); ComponentInstance::from_vmctx(vmctx, |instance| instance.resource_new32(resource, rep)) } -unsafe fn resource_rep32(vmctx: *mut VMComponentContext, resource: u32, idx: u32) -> Result { +unsafe fn resource_rep32( + vmctx: NonNull, + resource: u32, + idx: u32, +) -> Result { let resource = TypeResourceTableIndex::from_u32(resource); ComponentInstance::from_vmctx(vmctx, |instance| instance.resource_rep32(resource, idx)) } unsafe fn resource_drop( - vmctx: *mut VMComponentContext, + vmctx: NonNull, resource: u32, idx: u32, ) -> Result { @@ -521,7 +531,7 @@ unsafe impl HostResultHasUnwindSentinel for ResourceDropRet { } unsafe fn resource_transfer_own( - vmctx: *mut VMComponentContext, + vmctx: NonNull, src_idx: u32, src_table: u32, dst_table: u32, @@ -534,7 +544,7 @@ unsafe fn resource_transfer_own( } unsafe fn resource_transfer_borrow( - vmctx: *mut VMComponentContext, + vmctx: NonNull, src_idx: u32, src_table: u32, dst_table: u32, @@ -546,20 +556,20 @@ unsafe fn resource_transfer_borrow( }) } -unsafe fn resource_enter_call(vmctx: *mut VMComponentContext) { +unsafe fn resource_enter_call(vmctx: NonNull) { ComponentInstance::from_vmctx(vmctx, |instance| instance.resource_enter_call()) } -unsafe fn resource_exit_call(vmctx: *mut VMComponentContext) -> Result<()> { +unsafe fn resource_exit_call(vmctx: NonNull) -> Result<()> { ComponentInstance::from_vmctx(vmctx, |instance| instance.resource_exit_call()) } -unsafe fn trap(_vmctx: *mut VMComponentContext, code: u8) -> Result { +unsafe fn trap(_vmctx: NonNull, code: u8) -> Result { Err(wasmtime_environ::Trap::from_u8(code).unwrap().into()) } unsafe fn future_transfer( - vmctx: *mut VMComponentContext, + vmctx: NonNull, src_idx: u32, src_table: u32, dst_table: u32, @@ -569,7 +579,7 @@ unsafe fn future_transfer( } unsafe fn stream_transfer( - vmctx: *mut VMComponentContext, + vmctx: NonNull, src_idx: u32, src_table: u32, dst_table: u32, @@ -579,7 +589,7 @@ unsafe fn stream_transfer( } unsafe fn error_context_transfer( - vmctx: *mut VMComponentContext, + vmctx: NonNull, src_idx: u32, src_table: u32, dst_table: u32, diff --git a/crates/wasmtime/src/runtime/vm/const_expr.rs b/crates/wasmtime/src/runtime/vm/const_expr.rs index af1f6b9e1763..64f78f818d15 100644 --- a/crates/wasmtime/src/runtime/vm/const_expr.rs +++ b/crates/wasmtime/src/runtime/vm/const_expr.rs @@ -33,11 +33,7 @@ impl<'a> ConstEvalContext<'a> { fn global_get(&mut self, store: &mut AutoAssertNoGc<'_>, index: GlobalIndex) -> Result { unsafe { - let global = self - .instance - .defined_or_imported_global_ptr(index) - .as_ref() - .unwrap(); + let global = self.instance.defined_or_imported_global_ptr(index).as_ref(); global.to_val_raw(store, self.instance.env_module().globals[index].wasm_ty) } } diff --git a/crates/wasmtime/src/runtime/vm/debug_builtins.rs b/crates/wasmtime/src/runtime/vm/debug_builtins.rs index 2f765f467d4a..733d1bdbc788 100644 --- a/crates/wasmtime/src/runtime/vm/debug_builtins.rs +++ b/crates/wasmtime/src/runtime/vm/debug_builtins.rs @@ -2,10 +2,11 @@ use crate::runtime::vm::instance::Instance; use crate::runtime::vm::vmcontext::VMContext; +use core::ptr::NonNull; use wasmtime_environ::{EntityRef, MemoryIndex}; use wasmtime_versioned_export_macros::versioned_export; -static mut VMCTX_AND_MEMORY: (*mut VMContext, usize) = (std::ptr::null_mut(), 0); +static mut VMCTX_AND_MEMORY: (NonNull, usize) = (NonNull::dangling(), 0); // These implementatations are referenced from C code in "helpers.c". The symbols defined // there (prefixed by "wasmtime_") are the real 'public' interface used in the debug info. @@ -14,7 +15,7 @@ static mut VMCTX_AND_MEMORY: (*mut VMContext, usize) = (std::ptr::null_mut(), 0) pub unsafe extern "C" fn resolve_vmctx_memory_ptr(p: *const u32) -> *const u8 { let ptr = std::ptr::read(p); assert!( - !VMCTX_AND_MEMORY.0.is_null(), + VMCTX_AND_MEMORY.0 != NonNull::dangling(), "must call `__vmctx->set()` before resolving Wasm pointers" ); Instance::from_vmctx(VMCTX_AND_MEMORY.0, |handle| { @@ -24,14 +25,14 @@ pub unsafe extern "C" fn resolve_vmctx_memory_ptr(p: *const u32) -> *const u8 { ); let index = MemoryIndex::new(VMCTX_AND_MEMORY.1); let mem = handle.get_memory(index); - mem.base.add(ptr as usize) + mem.base.as_ptr().add(ptr as usize) }) } #[versioned_export] pub unsafe extern "C" fn set_vmctx_memory(vmctx_ptr: *mut VMContext) { // TODO multi-memory - VMCTX_AND_MEMORY = (vmctx_ptr, 0); + VMCTX_AND_MEMORY = (NonNull::new(vmctx_ptr).unwrap(), 0); } /// A bit of a hack around various linkage things. The goal here is to force the diff --git a/crates/wasmtime/src/runtime/vm/export.rs b/crates/wasmtime/src/runtime/vm/export.rs index 526f714d059e..45c4638ed7b2 100644 --- a/crates/wasmtime/src/runtime/vm/export.rs +++ b/crates/wasmtime/src/runtime/vm/export.rs @@ -45,9 +45,9 @@ impl From for Export { #[derive(Debug, Clone)] pub struct ExportTable { /// The address of the table descriptor. - pub definition: *mut VMTableDefinition, + pub definition: NonNull, /// Pointer to the containing `VMContext`. - pub vmctx: *mut VMContext, + pub vmctx: NonNull, /// The table declaration, used for compatibility checking. pub table: Table, } @@ -66,9 +66,9 @@ impl From for Export { #[derive(Debug, Clone)] pub struct ExportMemory { /// The address of the memory descriptor. - pub definition: *mut VMMemoryDefinition, + pub definition: NonNull, /// Pointer to the containing `VMContext`. - pub vmctx: *mut VMContext, + pub vmctx: NonNull, /// The memory declaration, used for compatibility checking. pub memory: Memory, /// The index at which the memory is defined within the `vmctx`. @@ -89,10 +89,10 @@ impl From for Export { #[derive(Debug, Clone)] pub struct ExportGlobal { /// The address of the global storage. - pub definition: *mut VMGlobalDefinition, + pub definition: NonNull, /// Pointer to the containing `VMContext`. May be null for host-created /// globals. - pub vmctx: *mut VMContext, + pub vmctx: Option>, /// The global declaration, used for compatibility checking. pub global: Global, } diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled/drc.rs b/crates/wasmtime/src/runtime/vm/gc/enabled/drc.rs index 29c02e79e59f..d7f4cb5d7fdf 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled/drc.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled/drc.rs @@ -667,9 +667,9 @@ unsafe impl GcHeap for DrcHeap { }) } - unsafe fn vmctx_gc_heap_data(&self) -> *mut u8 { - let ptr = &*self.activations_table as *const VMGcRefActivationsTable; - ptr.cast_mut().cast::() + unsafe fn vmctx_gc_heap_data(&self) -> NonNull { + let ptr: NonNull = NonNull::from(&*self.activations_table); + ptr.cast() } #[cfg(feature = "pooling-allocator")] diff --git a/crates/wasmtime/src/runtime/vm/gc/enabled/null.rs b/crates/wasmtime/src/runtime/vm/gc/enabled/null.rs index 776b3aabd1fb..b62d9494c6ee 100644 --- a/crates/wasmtime/src/runtime/vm/gc/enabled/null.rs +++ b/crates/wasmtime/src/runtime/vm/gc/enabled/null.rs @@ -14,6 +14,7 @@ use crate::{ }, GcHeapOutOfMemory, }; +use core::ptr::NonNull; use core::{ alloc::Layout, any::Any, @@ -309,8 +310,8 @@ unsafe impl GcHeap for NullHeap { Box::new(NullCollection {}) } - unsafe fn vmctx_gc_heap_data(&self) -> *mut u8 { - self.next.get().cast() + unsafe fn vmctx_gc_heap_data(&self) -> NonNull { + NonNull::new(self.next.get()).unwrap().cast() } #[cfg(feature = "pooling-allocator")] diff --git a/crates/wasmtime/src/runtime/vm/gc/gc_runtime.rs b/crates/wasmtime/src/runtime/vm/gc/gc_runtime.rs index b54f0e91a026..46c11adffd73 100644 --- a/crates/wasmtime/src/runtime/vm/gc/gc_runtime.rs +++ b/crates/wasmtime/src/runtime/vm/gc/gc_runtime.rs @@ -5,6 +5,7 @@ use crate::runtime::vm::{ ExternRefHostDataId, ExternRefHostDataTable, GcHeapObject, SendSyncPtr, TypedGcRef, VMArrayRef, VMExternRef, VMGcHeader, VMGcObjectDataMut, VMGcRef, VMStructRef, }; +use core::ptr::NonNull; use core::{ alloc::Layout, any::Any, cell::UnsafeCell, marker, mem, num::NonZeroUsize, ops::Range, ptr, }; @@ -353,7 +354,7 @@ pub unsafe trait GcHeap: 'static + Send + Sync { /// /// The returned pointer, if any, must remain valid as long as `self` is not /// dropped. - unsafe fn vmctx_gc_heap_data(&self) -> *mut u8; + unsafe fn vmctx_gc_heap_data(&self) -> NonNull; //////////////////////////////////////////////////////////////////////////// // Recycling GC Heap Methods diff --git a/crates/wasmtime/src/runtime/vm/instance.rs b/crates/wasmtime/src/runtime/vm/instance.rs index f5851bb6ff7b..1f6049003794 100644 --- a/crates/wasmtime/src/runtime/vm/instance.rs +++ b/crates/wasmtime/src/runtime/vm/instance.rs @@ -13,7 +13,7 @@ use crate::runtime::vm::vmcontext::{ }; use crate::runtime::vm::{ ExportFunction, ExportGlobal, ExportMemory, ExportTable, GcStore, Imports, ModuleRuntimeInfo, - SendSyncPtr, VMFunctionBody, VMGcRef, VMStore, VMStoreRawPtr, WasmFault, + SendSyncPtr, VMFunctionBody, VMGcRef, VMStore, VMStoreRawPtr, VmPtr, VmSafe, WasmFault, }; use crate::store::{StoreInner, StoreOpaque}; use crate::{prelude::*, StoreContextMut}; @@ -112,17 +112,15 @@ impl InstanceAndStore { /// See also the safety discussion in this type's documentation. #[inline] pub(crate) unsafe fn from_vmctx( - vmctx: *mut VMContext, + vmctx: NonNull, f: impl for<'a> FnOnce(&'a mut Self) -> R, ) -> R { - debug_assert!(!vmctx.is_null()); - const _: () = assert!(mem::size_of::() == mem::size_of::()); - let ptr = vmctx + let mut ptr = vmctx .byte_sub(mem::size_of::()) .cast::(); - f(&mut *ptr) + f(ptr.as_mut()) } /// Unpacks this `InstanceAndStore` into its underlying `Instance` and `dyn @@ -366,12 +364,14 @@ impl Instance { /// this can't be called twice on the same `VMContext` to get two active /// pointers to the same `Instance`. #[inline] - pub unsafe fn from_vmctx(vmctx: *mut VMContext, f: impl FnOnce(&mut Instance) -> R) -> R { - debug_assert!(!vmctx.is_null()); - let ptr = vmctx + pub unsafe fn from_vmctx( + vmctx: NonNull, + f: impl FnOnce(&mut Instance) -> R, + ) -> R { + let mut ptr = vmctx .byte_sub(mem::size_of::()) .cast::(); - f(&mut *ptr) + f(ptr.as_mut()) } /// Helper function to access various locations offset from our `*mut @@ -381,14 +381,15 @@ impl Instance { /// /// This method is unsafe because the `offset` must be within bounds of the /// `VMContext` object trailing this instance. - unsafe fn vmctx_plus_offset(&self, offset: impl Into) -> *const T { + unsafe fn vmctx_plus_offset(&self, offset: impl Into) -> *const T { self.vmctx() + .as_ptr() .byte_add(usize::try_from(offset.into()).unwrap()) .cast() } /// Dual of `vmctx_plus_offset`, but for mutability. - unsafe fn vmctx_plus_offset_mut(&mut self, offset: impl Into) -> *mut T { + unsafe fn vmctx_plus_offset_mut(&mut self, offset: impl Into) -> NonNull { self.vmctx() .byte_add(usize::try_from(offset.into()).unwrap()) .cast() @@ -439,18 +440,18 @@ impl Instance { /// Return the indexed `VMTableDefinition`. #[allow(dead_code)] fn table(&mut self, index: DefinedTableIndex) -> VMTableDefinition { - unsafe { *self.table_ptr(index) } + unsafe { self.table_ptr(index).read() } } /// Updates the value for a defined table to `VMTableDefinition`. fn set_table(&mut self, index: DefinedTableIndex, table: VMTableDefinition) { unsafe { - *self.table_ptr(index) = table; + self.table_ptr(index).write(table); } } /// Return the indexed `VMTableDefinition`. - fn table_ptr(&mut self, index: DefinedTableIndex) -> *mut VMTableDefinition { + fn table_ptr(&mut self, index: DefinedTableIndex) -> NonNull { unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_vmtable_definition(index)) } } @@ -460,7 +461,7 @@ impl Instance { self.memory(defined_index) } else { let import = self.imported_memory(index); - unsafe { VMMemoryDefinition::load(import.from) } + unsafe { VMMemoryDefinition::load(import.from.as_ptr()) } } } @@ -472,8 +473,9 @@ impl Instance { } else { let import = self.imported_memory(index); unsafe { - let ptr = - Instance::from_vmctx(import.vmctx, |i| i.get_defined_memory(import.index)); + let ptr = Instance::from_vmctx(import.vmctx.as_non_null(), |i| { + i.get_defined_memory(import.index) + }); &mut *ptr } } @@ -481,23 +483,26 @@ impl Instance { /// Return the indexed `VMMemoryDefinition`. fn memory(&self, index: DefinedMemoryIndex) -> VMMemoryDefinition { - unsafe { VMMemoryDefinition::load(self.memory_ptr(index)) } + unsafe { VMMemoryDefinition::load(self.memory_ptr(index).as_ptr()) } } /// Set the indexed memory to `VMMemoryDefinition`. fn set_memory(&self, index: DefinedMemoryIndex, mem: VMMemoryDefinition) { unsafe { - *self.memory_ptr(index) = mem; + self.memory_ptr(index).write(mem); } } /// Return the indexed `VMMemoryDefinition`. - fn memory_ptr(&self, index: DefinedMemoryIndex) -> *mut VMMemoryDefinition { - unsafe { *self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_pointer(index)) } + fn memory_ptr(&self, index: DefinedMemoryIndex) -> NonNull { + let vmptr = unsafe { + *self.vmctx_plus_offset::>(self.offsets().vmctx_vmmemory_pointer(index)) + }; + vmptr.as_non_null() } /// Return the indexed `VMGlobalDefinition`. - fn global_ptr(&mut self, index: DefinedGlobalIndex) -> *mut VMGlobalDefinition { + fn global_ptr(&mut self, index: DefinedGlobalIndex) -> NonNull { unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_vmglobal_definition(index)) } } @@ -508,11 +513,11 @@ impl Instance { pub(crate) fn defined_or_imported_global_ptr( &mut self, index: GlobalIndex, - ) -> *mut VMGlobalDefinition { + ) -> NonNull { if let Some(index) = self.env_module().defined_global_index(index) { self.global_ptr(index) } else { - self.imported_global(index).from + self.imported_global(index).from.as_non_null() } } @@ -532,7 +537,7 @@ impl Instance { idx, ExportGlobal { definition: self.defined_or_imported_global_ptr(idx), - vmctx: self.vmctx(), + vmctx: Some(self.vmctx()), global: self.env_module().globals[idx], }, ) @@ -552,7 +557,7 @@ impl Instance { let def_idx = module.defined_global_index(global_idx).unwrap(); let global = ExportGlobal { definition: self.global_ptr(def_idx), - vmctx: self.vmctx(), + vmctx: Some(self.vmctx()), global: self.env_module().globals[global_idx], }; (def_idx, global) @@ -561,27 +566,27 @@ impl Instance { /// Return a pointer to the interrupts structure #[inline] - pub fn runtime_limits(&mut self) -> *mut *const VMRuntimeLimits { + pub fn runtime_limits(&mut self) -> NonNull>> { unsafe { self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_runtime_limits()) } } /// Return a pointer to the global epoch counter used by this instance. - pub fn epoch_ptr(&mut self) -> *mut *const AtomicU64 { + pub fn epoch_ptr(&mut self) -> NonNull>> { unsafe { self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_epoch_ptr()) } } /// Return a pointer to the GC heap base pointer. - pub fn gc_heap_base(&mut self) -> *mut *mut u8 { + pub fn gc_heap_base(&mut self) -> NonNull>> { unsafe { self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_gc_heap_base()) } } /// Return a pointer to the GC heap bound. - pub fn gc_heap_bound(&mut self) -> *mut usize { + pub fn gc_heap_bound(&mut self) -> NonNull { unsafe { self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_gc_heap_bound()) } } /// Return a pointer to the collector-specific heap data. - pub fn gc_heap_data(&mut self) -> *mut *mut u8 { + pub fn gc_heap_data(&mut self) -> NonNull>> { unsafe { self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_gc_heap_data()) } } @@ -589,12 +594,14 @@ impl Instance { self.store = store.map(VMStoreRawPtr); if let Some(mut store) = store { let store = store.as_mut(); - *self.runtime_limits() = store.vmruntime_limits(); - *self.epoch_ptr() = store.engine().epoch_counter(); + self.runtime_limits() + .write(Some(store.vmruntime_limits().into())); + self.epoch_ptr() + .write(Some(NonNull::from(store.engine().epoch_counter()).into())); self.set_gc_heap(store.gc_store_mut().ok()); } else { - *self.runtime_limits() = ptr::null_mut(); - *self.epoch_ptr() = ptr::null_mut(); + self.runtime_limits().write(None); + self.epoch_ptr().write(None); self.set_gc_heap(None); } } @@ -602,24 +609,27 @@ impl Instance { unsafe fn set_gc_heap(&mut self, gc_store: Option<&mut GcStore>) { if let Some(gc_store) = gc_store { let heap = gc_store.gc_heap.heap_slice_mut(); - *self.gc_heap_base() = heap.as_mut_ptr(); - *self.gc_heap_bound() = heap.len(); - *self.gc_heap_data() = gc_store.gc_heap.vmctx_gc_heap_data(); + self.gc_heap_bound().write(heap.len()); + self.gc_heap_base() + .write(Some(NonNull::from(heap).cast().into())); + self.gc_heap_data() + .write(Some(gc_store.gc_heap.vmctx_gc_heap_data().into())); } else { - *self.gc_heap_base() = ptr::null_mut(); - *self.gc_heap_bound() = 0; - *self.gc_heap_data() = ptr::null_mut(); + self.gc_heap_bound().write(0); + self.gc_heap_base().write(None); + self.gc_heap_data().write(None); } } pub(crate) unsafe fn set_callee(&mut self, callee: Option>) { - *self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_callee()) = - callee.map_or(ptr::null_mut(), |c| c.as_ptr()); + let callee = callee.map(|p| VmPtr::from(p)); + self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_callee()) + .write(callee); } /// Return a reference to the vmctx used by compiled wasm code. #[inline] - pub fn vmctx(&self) -> *mut VMContext { + pub fn vmctx(&self) -> NonNull { // The definition of this method is subtle but intentional. The goal // here is that effectively this should return `&mut self.vmctx`, but // it's not quite so simple. Some more documentation is available on the @@ -641,7 +651,8 @@ impl Instance { // trait `Strict` but the method names conflict with the nightly methods // so a different syntax is used to invoke methods here. let addr = &raw const self.vmctx; - Strict::with_addr(self.vmctx_self_reference.as_ptr(), Strict::addr(addr)) + let ret = Strict::with_addr(self.vmctx_self_reference.as_ptr(), Strict::addr(addr)); + NonNull::new(ret).unwrap() } fn get_exported_func(&mut self, index: FuncIndex) -> ExportFunction { @@ -655,7 +666,7 @@ impl Instance { (self.table_ptr(def_index), self.vmctx()) } else { let import = self.imported_table(index); - (import.from, import.vmctx) + (import.from.as_non_null(), import.vmctx.as_non_null()) }; ExportTable { definition, @@ -670,7 +681,11 @@ impl Instance { (self.memory_ptr(def_index), self.vmctx(), def_index) } else { let import = self.imported_memory(index); - (import.from, import.vmctx, import.index) + ( + import.from.as_non_null(), + import.vmctx.as_non_null(), + import.index, + ) }; ExportMemory { definition, @@ -685,9 +700,9 @@ impl Instance { definition: if let Some(def_index) = self.env_module().defined_global_index(index) { self.global_ptr(def_index) } else { - self.imported_global(index).from + self.imported_global(index).from.as_non_null() }, - vmctx: self.vmctx(), + vmctx: Some(self.vmctx()), global: self.env_module().globals[index], } } @@ -712,7 +727,7 @@ impl Instance { let index = DefinedTableIndex::new( usize::try_from( (table as *const VMTableDefinition) - .offset_from(self.table_ptr(DefinedTableIndex::new(0))), + .offset_from(self.table_ptr(DefinedTableIndex::new(0)).as_ptr()), ) .unwrap(), ); @@ -741,7 +756,7 @@ impl Instance { None => { let import = self.imported_memory(index); unsafe { - Instance::from_vmctx(import.vmctx, |i| { + Instance::from_vmctx(import.vmctx.as_non_null(), |i| { i.defined_memory_grow(store, import.index, delta) }) } @@ -821,6 +836,10 @@ impl Instance { Layout::from_size_align(size, align).unwrap() } + fn type_ids_array(&mut self) -> NonNull> { + unsafe { self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_type_ids_array()) } + } + /// Construct a new VMFuncRef for the given function /// (imported or defined in this module) and store into the given /// location. Used during lazy initialization. @@ -837,8 +856,7 @@ impl Instance { into: *mut VMFuncRef, ) { let type_index = unsafe { - let base: *const VMSharedTypeIndex = - *self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_type_ids_array()); + let base = self.type_ids_array().read().as_ptr(); *base.add(sig.index()) }; @@ -847,9 +865,10 @@ impl Instance { array_call: self .runtime_info .array_to_wasm_trampoline(def_index) - .expect("should have array-to-Wasm trampoline for escaping function"), - wasm_call: Some(self.runtime_info.function(def_index)), - vmctx: VMOpaqueContext::from_vmcontext(self.vmctx()), + .expect("should have array-to-Wasm trampoline for escaping function") + .into(), + wasm_call: Some(self.runtime_info.function(def_index).into()), + vmctx: VMOpaqueContext::from_vmcontext(self.vmctx()).into(), type_index, } } else { @@ -909,11 +928,11 @@ impl Instance { // all! let func = &self.env_module().functions[index]; let sig = func.signature; - let func_ref: *mut VMFuncRef = self + let func_ref = self .vmctx_plus_offset_mut::(self.offsets().vmctx_func_ref(func.func_ref)); - self.construct_func_ref(index, sig, func_ref); + self.construct_func_ref(index, sig, func_ref.as_ptr()); - Some(NonNull::new(func_ref).unwrap()) + Some(func_ref) } } @@ -1092,8 +1111,8 @@ impl Instance { // Bounds and casts are checked above, by this point we know that // everything is safe. unsafe { - let dst = dst_mem.base.add(dst); - let src = src_mem.base.add(src); + let dst = dst_mem.base.as_ptr().add(dst); + let src = src_mem.base.as_ptr().add(src); // FIXME audit whether this is safe in the presence of shared memory // (https://github.com/bytecodealliance/wasmtime/issues/4203). ptr::copy(src, dst, len); @@ -1134,7 +1153,7 @@ impl Instance { // Bounds and casts are checked above, by this point we know that // everything is safe. unsafe { - let dst = memory.base.add(dst); + let dst = memory.base.as_ptr().add(dst); // FIXME audit whether this is safe in the presence of shared memory // (https://github.com/bytecodealliance/wasmtime/issues/4203). ptr::write_bytes(dst, val, len); @@ -1196,7 +1215,7 @@ impl Instance { unsafe { let src_start = data.as_ptr().add(src); - let dst_start = memory.base.add(dst); + let dst_start = memory.base.as_ptr().add(dst); // FIXME audit whether this is safe in the presence of shared memory // (https://github.com/bytecodealliance/wasmtime/issues/4203). ptr::copy_nonoverlapping(src_start, dst_start, len); @@ -1309,8 +1328,8 @@ impl Instance { } else { let import = self.imported_table(index); unsafe { - Instance::from_vmctx(import.vmctx, |foreign_instance| { - let foreign_table_def = import.from; + Instance::from_vmctx(import.vmctx.as_non_null(), |foreign_instance| { + let foreign_table_def = import.from.as_ptr(); let foreign_table_index = foreign_instance.table_index(&*foreign_table_def); f(foreign_table_index, foreign_instance) }) @@ -1332,41 +1351,47 @@ impl Instance { ) { assert!(ptr::eq(module, self.env_module().as_ref())); - *self.vmctx_plus_offset_mut(offsets.ptr.vmctx_magic()) = VMCONTEXT_MAGIC; + self.vmctx_plus_offset_mut(offsets.ptr.vmctx_magic()) + .write(VMCONTEXT_MAGIC); self.set_callee(None); self.set_store(store.as_raw()); // Initialize shared types - let types = self.runtime_info.type_ids(); - *self.vmctx_plus_offset_mut(offsets.ptr.vmctx_type_ids_array()) = types.as_ptr(); + let types = NonNull::from(self.runtime_info.type_ids()); + self.type_ids_array().write(types.cast().into()); // Initialize the built-in functions - *self.vmctx_plus_offset_mut(offsets.ptr.vmctx_builtin_functions()) = - &VMBuiltinFunctionsArray::INIT; + let ptr: NonNull = NonNull::from(&VMBuiltinFunctionsArray::INIT); + self.vmctx_plus_offset_mut(offsets.ptr.vmctx_builtin_functions()) + .write(VmPtr::from(ptr)); // Initialize the imports debug_assert_eq!(imports.functions.len(), module.num_imported_funcs); ptr::copy_nonoverlapping( imports.functions.as_ptr(), - self.vmctx_plus_offset_mut(offsets.vmctx_imported_functions_begin()), + self.vmctx_plus_offset_mut(offsets.vmctx_imported_functions_begin()) + .as_ptr(), imports.functions.len(), ); debug_assert_eq!(imports.tables.len(), module.num_imported_tables); ptr::copy_nonoverlapping( imports.tables.as_ptr(), - self.vmctx_plus_offset_mut(offsets.vmctx_imported_tables_begin()), + self.vmctx_plus_offset_mut(offsets.vmctx_imported_tables_begin()) + .as_ptr(), imports.tables.len(), ); debug_assert_eq!(imports.memories.len(), module.num_imported_memories); ptr::copy_nonoverlapping( imports.memories.as_ptr(), - self.vmctx_plus_offset_mut(offsets.vmctx_imported_memories_begin()), + self.vmctx_plus_offset_mut(offsets.vmctx_imported_memories_begin()) + .as_ptr(), imports.memories.len(), ); debug_assert_eq!(imports.globals.len(), module.num_imported_globals); ptr::copy_nonoverlapping( imports.globals.as_ptr(), - self.vmctx_plus_offset_mut(offsets.vmctx_imported_globals_begin()), + self.vmctx_plus_offset_mut(offsets.vmctx_imported_globals_begin()) + .as_ptr(), imports.globals.len(), ); @@ -1378,7 +1403,7 @@ impl Instance { // Initialize the defined tables let mut ptr = self.vmctx_plus_offset_mut(offsets.vmctx_tables_begin()); for i in 0..module.num_defined_tables() { - ptr::write(ptr, self.tables[DefinedTableIndex::new(i)].1.vmtable()); + ptr.write(self.tables[DefinedTableIndex::new(i)].1.vmtable()); ptr = ptr.add(1); } @@ -1398,10 +1423,10 @@ impl Instance { .as_shared_memory() .unwrap() .vmmemory_ptr(); - ptr::write(ptr, def_ptr.cast_mut()); + ptr.write(VmPtr::from(def_ptr)); } else { - ptr::write(owned_ptr, self.memories[defined_memory_index].1.vmmemory()); - ptr::write(ptr, owned_ptr); + owned_ptr.write(self.memories[defined_memory_index].1.vmmemory()); + ptr.write(VmPtr::from(owned_ptr)); owned_ptr = owned_ptr.add(1); } ptr = ptr.add(1); @@ -1412,7 +1437,7 @@ impl Instance { // with their const expression initializers after the instance is fully // allocated. for (index, _init) in module.global_initializers.iter() { - ptr::write(self.global_ptr(index), VMGlobalDefinition::new()); + self.global_ptr(index).write(VMGlobalDefinition::new()); } } @@ -1449,7 +1474,7 @@ impl InstanceHandle { /// Return a raw pointer to the vmctx used by compiled wasm code. #[inline] - pub fn vmctx(&self) -> *mut VMContext { + pub fn vmctx(&self) -> NonNull { self.instance().vmctx() } diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator.rs b/crates/wasmtime/src/runtime/vm/instance/allocator.rs index e7e2e369d1d7..6b32581617b4 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator.rs @@ -735,7 +735,7 @@ fn initialize_memories( unsafe { let src = self.context.instance.wasm_data(init.data.clone()); let offset = usize::try_from(init.offset).unwrap(); - let dst = memory.base.add(offset); + let dst = memory.base.as_ptr().add(offset); assert!(offset + src.len() <= memory.current_length()); @@ -812,10 +812,7 @@ fn initialize_globals( // This write is safe because we know we have the correct module for // this instance and its vmctx due to the assert above. unsafe { - ptr::write( - to, - VMGlobalDefinition::from_val_raw(&mut store, wasm_ty, raw)?, - ) + to.write(VMGlobalDefinition::from_val_raw(&mut store, wasm_ty, raw)?); }; } Ok(()) diff --git a/crates/wasmtime/src/runtime/vm/interpreter.rs b/crates/wasmtime/src/runtime/vm/interpreter.rs index 28a73b09a630..bd0973fdbd87 100644 --- a/crates/wasmtime/src/runtime/vm/interpreter.rs +++ b/crates/wasmtime/src/runtime/vm/interpreter.rs @@ -85,15 +85,15 @@ impl InterpreterRef<'_> { pub unsafe fn call( mut self, mut bytecode: NonNull, - callee: *mut VMOpaqueContext, - caller: *mut VMOpaqueContext, - args_and_results: *mut [ValRaw], + callee: NonNull, + caller: NonNull, + args_and_results: NonNull<[ValRaw]>, ) -> bool { // Initialize argument registers with the ABI arguments. let args = [ - XRegVal::new_ptr(callee).into(), - XRegVal::new_ptr(caller).into(), - XRegVal::new_ptr(args_and_results.cast::()).into(), + XRegVal::new_ptr(callee.as_ptr()).into(), + XRegVal::new_ptr(caller.as_ptr()).into(), + XRegVal::new_ptr(args_and_results.cast::().as_ptr()).into(), XRegVal::new_u64(args_and_results.len() as u64).into(), ]; @@ -321,6 +321,7 @@ impl InterpreterRef<'_> { (@get vmctx $reg:ident) => (self.0[$reg].get_ptr()); (@get pointer $reg:ident) => (self.0[$reg].get_ptr()); (@get ptr $reg:ident) => (self.0[$reg].get_ptr()); + (@get nonnull $reg:ident) => (NonNull::new(self.0[$reg].get_ptr()).unwrap()); (@get ptr_u8 $reg:ident) => (self.0[$reg].get_ptr()); (@get ptr_u16 $reg:ident) => (self.0[$reg].get_ptr()); (@get ptr_size $reg:ident) => (self.0[$reg].get_ptr()); @@ -352,7 +353,7 @@ impl InterpreterRef<'_> { // if id == const { HostCall::ArrayCall.index() } { - call!(@host VMArrayCallNative(ptr, ptr, ptr, size) -> bool); + call!(@host VMArrayCallNative(nonnull, nonnull, nonnull, size) -> bool); } macro_rules! core { @@ -378,7 +379,7 @@ impl InterpreterRef<'_> { use wasmtime_environ::component::ComponentBuiltinFunctionIndex; if id == const { HostCall::ComponentLowerImport.index() } { - call!(@host VMLoweringCallee(ptr, ptr, u32, ptr, ptr, ptr, u8, ptr, size) -> bool); + call!(@host VMLoweringCallee(nonnull, nonnull, u32, nonnull, ptr, ptr, u8, nonnull, size) -> bool); } macro_rules! component { diff --git a/crates/wasmtime/src/runtime/vm/interpreter_disabled.rs b/crates/wasmtime/src/runtime/vm/interpreter_disabled.rs index e0fcc7b30c79..50a3b15754e3 100644 --- a/crates/wasmtime/src/runtime/vm/interpreter_disabled.rs +++ b/crates/wasmtime/src/runtime/vm/interpreter_disabled.rs @@ -40,9 +40,9 @@ impl InterpreterRef<'_> { pub unsafe fn call( self, _bytecode: NonNull, - _callee: *mut VMOpaqueContext, - _caller: *mut VMOpaqueContext, - _args_and_results: *mut [ValRaw], + _callee: NonNull, + _caller: NonNull, + _args_and_results: NonNull<[ValRaw]>, ) -> bool { match self.empty {} } diff --git a/crates/wasmtime/src/runtime/vm/libcalls.rs b/crates/wasmtime/src/runtime/vm/libcalls.rs index c61168c437ac..ae08c0771fd8 100644 --- a/crates/wasmtime/src/runtime/vm/libcalls.rs +++ b/crates/wasmtime/src/runtime/vm/libcalls.rs @@ -90,7 +90,7 @@ pub mod raw { // between doc comments and `cfg`s. #![allow(unused_doc_comments, unused_attributes)] - use crate::runtime::vm::{InstanceAndStore, VMContext}; + use crate::runtime::vm::{InstanceAndStore, VMContext, VmPtr}; macro_rules! libcall { ( @@ -108,13 +108,13 @@ pub mod raw { // with conversion of the return value in the face of traps. #[allow(unused_variables, missing_docs)] pub unsafe extern "C" fn $name( - vmctx: *mut VMContext, + vmctx: VmPtr, $( $pname : libcall!(@ty $param), )* ) $(-> libcall!(@ty $result))? { $(#[cfg($attr)])? { crate::runtime::vm::traphandlers::catch_unwind_and_record_trap(|| { - InstanceAndStore::from_vmctx(vmctx, |pair| { + InstanceAndStore::from_vmctx(vmctx.as_non_null(), |pair| { let (instance, store) = pair.unpack_mut(); super::$name(store, instance, $($pname),*) }) @@ -133,7 +133,7 @@ pub mod raw { const _: () = { #[used] static I_AM_USED: unsafe extern "C" fn( - *mut VMContext, + VmPtr, $( $pname : libcall!(@ty $param), )* ) $( -> libcall!(@ty $result))? = $name; }; diff --git a/crates/wasmtime/src/runtime/vm/memory.rs b/crates/wasmtime/src/runtime/vm/memory.rs index 277e3cee5f68..5d4f595eec14 100644 --- a/crates/wasmtime/src/runtime/vm/memory.rs +++ b/crates/wasmtime/src/runtime/vm/memory.rs @@ -199,14 +199,20 @@ impl MemoryBase { Self::Raw(NonNull::new(ptr).expect("pointer is non-null").into()) } - /// Returns the actual memory address in memory that is represented by this base. - pub fn as_mut_ptr(&self) -> *mut u8 { + /// Returns the actual memory address in memory that is represented by this + /// base. + pub fn as_non_null(&self) -> NonNull { match self { - Self::Raw(ptr) => ptr.as_ptr(), + Self::Raw(ptr) => ptr.as_non_null(), #[cfg(has_virtual_memory)] - Self::Mmap(mmap_offset) => mmap_offset.as_mut_ptr(), + Self::Mmap(mmap_offset) => mmap_offset.as_non_null(), } } + + /// Same as `as_non_null`, but different return type. + pub fn as_mut_ptr(&self) -> *mut u8 { + self.as_non_null().as_ptr() + } } /// Representation of a runtime wasm linear memory. @@ -689,7 +695,7 @@ impl LocalMemory { pub fn vmmemory(&mut self) -> VMMemoryDefinition { VMMemoryDefinition { - base: self.alloc.base().as_mut_ptr(), + base: self.alloc.base().as_non_null().into(), current_length: self.alloc.byte_size().into(), } } @@ -747,5 +753,5 @@ pub fn validate_atomic_addr( } let addr = usize::try_from(addr).unwrap(); - Ok(def.base.wrapping_add(addr)) + Ok(def.base.as_ptr().wrapping_add(addr)) } diff --git a/crates/wasmtime/src/runtime/vm/memory/shared_memory.rs b/crates/wasmtime/src/runtime/vm/memory/shared_memory.rs index 6412c325694d..66ecdde087a8 100644 --- a/crates/wasmtime/src/runtime/vm/memory/shared_memory.rs +++ b/crates/wasmtime/src/runtime/vm/memory/shared_memory.rs @@ -5,6 +5,7 @@ use crate::runtime::vm::vmcontext::VMMemoryDefinition; use crate::runtime::vm::{Memory, VMStore, WaitResult}; use std::cell::RefCell; use std::ops::Range; +use std::ptr::NonNull; use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use std::sync::{Arc, RwLock}; use std::time::{Duration, Instant}; @@ -63,8 +64,8 @@ impl SharedMemory { } /// Return a pointer to the shared memory's [VMMemoryDefinition]. - pub fn vmmemory_ptr(&self) -> *const VMMemoryDefinition { - &self.0.def.0 + pub fn vmmemory_ptr(&self) -> NonNull { + NonNull::from(&self.0.def.0) } /// Same as `RuntimeLinearMemory::grow`, except with `&self`. diff --git a/crates/wasmtime/src/runtime/vm/memory/shared_memory_disabled.rs b/crates/wasmtime/src/runtime/vm/memory/shared_memory_disabled.rs index fd88d1c070cd..936f4ad484a7 100644 --- a/crates/wasmtime/src/runtime/vm/memory/shared_memory_disabled.rs +++ b/crates/wasmtime/src/runtime/vm/memory/shared_memory_disabled.rs @@ -4,6 +4,7 @@ use crate::prelude::*; use crate::runtime::vm::memory::LocalMemory; use crate::runtime::vm::{VMMemoryDefinition, VMStore, WaitResult}; use core::ops::Range; +use core::ptr::NonNull; use core::time::Duration; use wasmtime_environ::{Trap, Tunables}; @@ -23,7 +24,7 @@ impl SharedMemory { match self {} } - pub fn vmmemory_ptr(&self) -> *const VMMemoryDefinition { + pub fn vmmemory_ptr(&self) -> NonNull { match *self {} } diff --git a/crates/wasmtime/src/runtime/vm/mmap.rs b/crates/wasmtime/src/runtime/vm/mmap.rs index 78e50939c26b..5e43190fb5eb 100644 --- a/crates/wasmtime/src/runtime/vm/mmap.rs +++ b/crates/wasmtime/src/runtime/vm/mmap.rs @@ -6,6 +6,7 @@ use crate::prelude::*; use crate::runtime::vm::sys::{mmap, vm::MemoryImageSource}; use alloc::sync::Arc; use core::ops::Range; +use core::ptr::NonNull; #[cfg(feature = "std")] use std::fs::File; @@ -261,6 +262,12 @@ impl Mmap { self.sys.as_send_sync_ptr().as_ptr() } + /// Return the allocated memory as a mutable pointer to u8. + #[inline] + pub fn as_non_null(&self) -> NonNull { + self.sys.as_send_sync_ptr().as_non_null() + } + /// Return the length of the allocated memory. /// /// This is the byte length of this entire mapping which includes both @@ -383,8 +390,14 @@ impl MmapOffset { /// Returns the raw pointer in memory represented by this offset. #[inline] pub fn as_mut_ptr(&self) -> *mut u8 { + self.as_non_null().as_ptr() + } + + /// Returns the raw pointer in memory represented by this offset. + #[inline] + pub fn as_non_null(&self) -> NonNull { // SAFETY: constructor checks that offset is within this allocation. - unsafe { self.mmap().as_mut_ptr().byte_add(self.offset.byte_count()) } + unsafe { self.mmap().as_non_null().byte_add(self.offset.byte_count()) } } /// Maps an image into the mmap with read/write permissions. diff --git a/crates/wasmtime/src/runtime/vm/provenance.rs b/crates/wasmtime/src/runtime/vm/provenance.rs new file mode 100644 index 000000000000..d8bde1497131 --- /dev/null +++ b/crates/wasmtime/src/runtime/vm/provenance.rs @@ -0,0 +1,210 @@ +//! Helpers related to pointer provenance for Wasmtime and its runtime. +//! +//! This module encapsulates the efforts and lengths that Wasmtime goes to in +//! order to properly respect pointer provenance in Rust with respect to unsafe +//! code. Wasmtime has a nontrivial amount of `unsafe` code and when/where +//! pointers are valid is something we need to be particularly careful about. +//! All safe Rust does not need to worry about this module and only the unsafe +//! runtime bits need to worry about it. +//! +//! In general Wasmtime does not work with Rust's strict pointer provenance +//! rules. The primary reason for this is that Cranelift does not have the +//! concept of a pointer type meaning that backends cannot know what values are +//! pointers and what aren't. This isn't a huge issue for ISAs like x64 but for +//! an ISA like Pulley Bytecode it means that the Pulley interpreter cannot +//! respect strict provenance. +//! +//! > **Aside**: an example of how Pulley can't respect pointer provenance is +//! > consider a wasm load. The wasm load will add a wasm address to the base +//! > address of the host. In this situation what actually needs to happen is +//! > that the base address of the host is a pointer which is byte-offset'd by +//! > the wasm address. Cranelift IR has no knowledge of which value is +//! > the wasm address and which is the host address. This means that Cranelift +//! > can freely commute the operands of the addition. This means that when +//! > executing Pulley doesn't know which values are addresses and which aren't. +//! +//! This isn't the end of the world for Wasmtime, however, it just means that +//! when we run in MIRI we are restricted to "permissive provenance" or "exposed +//! provenance". The tl;dr; of exposed provenance is that at certain points we +//! declare a pointer as "this is now exposed". That converts a pointer to the +//! `usize` address and then semantically (just for rustc/llvm mostly) indicates +//! that the provenance of the pointer is added to a global list of provenances. +//! Later on Wasmtime will execute an operation to convert a `usize` back into a +//! pointer which will pick "the most appropriate provenance" from said global +//! list of provenances. +//! +//! In practice we expect that at runtime all of these provenance-related ops +//! are noops and compile away to nothing. The only practical effect that's +//! expected is that some optimizations may be hindered in LLVM occasionally or +//! something like that which is by-and-large what we want to happen. Note that +//! another practical consequence of not working with "strict provenance" means +//! that Wasmtime is incompatible with platforms such as CHERI where exposed +//! provenance is not available. + +use crate::vm::SendSyncPtr; +use core::fmt; +use core::ptr::NonNull; +use core::sync::atomic::{AtomicU64, AtomicUsize}; +use wasmtime_environ::VMSharedTypeIndex; + +/// A pointer that is used by compiled code, or in other words is accessed +/// outside of Rust. +/// +/// This is intended to be the fundamental data type used to share +/// pointers-to-things with compiled wasm compiled code for example. An example +/// of this is that the `VMMemoryDefinition` type, which compiled code reads to +/// learn about linear memory, uses a `VmPtr` to represent the base pointer +/// of linear memory. +/// +/// This type is pointer-sized and typed-like-a-pointer. This is additionally +/// like a `NonNull` in that it's never a null pointer (and +/// `Option>` is pointer-sized). This pointer auto-infers +/// `Send` and `Sync` based on `T`. Note the lack of `T: ?Sized` bounds in this +/// type additionally, meaning that it only works with sized types. That's +/// intentional as compiled code should not be interacting with dynamically +/// sized types in Rust. +/// +/// This type serves two major purposes with respect to provenance and safety: +/// +/// * Primarily this type is the only pointer type that implements `VmSafe`, the +/// marker trait below. That forces all pointers shared with compiled code to +/// use this type. +/// +/// * This type represents a pointer with "exposed provenance". Once a value of +/// this type is created the original pointer's provenance will be marked as +/// exposed. This operation may hinder optimizations around the use of said +/// pointer in that case. +/// +/// This type is expected to be used not only when sending pointers to compiled +/// code (e.g. `VMContext`) but additionally for any data at rest which shares +/// pointers with compiled code (for example the base of linear memory or +/// pointers stored within `VMContext` itself). +/// +/// In general usage of this type should be minimized to only where absolutely +/// necessary when sharing data structures with compiled code. Prefer to use +/// `NonNull` or `SendSyncPtr` where possible. +#[repr(transparent)] +pub struct VmPtr(SendSyncPtr); + +impl VmPtr { + /// View this pointer as a [`SendSyncPtr`]. + /// + /// This operation will convert the storage at-rest to a native pointer on + /// the host. This is effectively an integer-to-pointer operation which will + /// assume that the original pointer's provenance was previously exposed. + /// In typical operation this means that Wasmtime will initialize data + /// structures by creating an instance of `VmPtr`, exposing provenance. + /// Later on this type will be handed back to Wasmtime or read from its + /// location at-rest in which case provenance will be "re-acquired". + pub fn as_send_sync(&self) -> SendSyncPtr { + self.0 + } + + /// Similar to `as_send_sync`, but returns a `NonNull`. + pub fn as_non_null(&self) -> NonNull { + self.0.as_non_null() + } + + /// Similar to `as_send_sync`, but returns a `*mut T`. + pub fn as_ptr(&self) -> *mut T { + self.0.as_ptr() + } +} + +// `VmPtr`, like raw pointers, is trivially `Clone`/`Copy`. +impl Clone for VmPtr { + fn clone(&self) -> VmPtr { + *self + } +} + +impl Copy for VmPtr {} + +// Forward debugging to `SendSyncPtr` which renders the address. +impl fmt::Debug for VmPtr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.as_send_sync().fmt(f) + } +} + +// Constructor from `NonNull` +impl From> for VmPtr { + fn from(ptr: NonNull) -> VmPtr { + VmPtr::from(SendSyncPtr::from(ptr)) + } +} + +// Constructor from `SendSyncPtr` +impl From> for VmPtr { + fn from(ptr: SendSyncPtr) -> VmPtr { + VmPtr(ptr) + } +} + +/// A custom "marker trait" used to tag types that are safe to share with +/// compiled wasm code. +/// +/// The intention of this trait is to be used as a bound in a few core locations +/// in Wasmtime, such as `Instance::vmctx_plus_offset_mut`, and otherwise not +/// present very often. The purpose of this trait is to ensure that all types +/// stored to be shared with compiled code have a known layout and are +/// guaranteed to be "safe" to share with compiled wasm code. +/// +/// This is an `unsafe` trait as it's generally not safe to share anything with +/// compiled code and it is used to invite extra scrutiny to manual `impl`s of +/// this trait. Types which implement this marker trait must satisfy at least +/// the following requirements. +/// +/// * The ABI of `Self` must be well-known and defined. This means that the type +/// can interoperate with compiled code. For example `u8` is well defined as +/// is a `#[repr(C)]` structure. Types lacking `#[repr(C)]` or other types +/// like Rust tuples do not satisfy this requirement. +/// +/// * For types which contain pointers the pointer's provenance is guaranteed to +/// have been exposed when the type is constructed. This is satisfied where +/// the only pointer that implements this trait is `VmPtr` above which is +/// explicitly used to indicate exposed provenance. Notably `*mut T` and +/// `NonNull` do not implement this trait, and intentionally so. +/// +/// * For composite structures (e.g. `struct`s in Rust) all member fields must +/// satisfy the above criteria. All fields must have defined layouts and +/// pointers must be `VmPtr`. +/// +/// * Newtype or wrapper types around primitives that are used by value must be +/// `#[repr(transparent)]` to ensure they aren't considered aggregates by the +/// compile to match the ABI of the primitive type. +/// +/// In this module a number of impls are provided for the primitives of Rust, +/// for example integers. Additionally some basic pointer-related impls are +/// provided for `VmPtr` above. More impls can be found in `vmcontext.rs` +/// where there are manual impls for all `VM*` data structures which are shared +/// with compiled code. +pub unsafe trait VmSafe {} + +// Implementations for primitive types. Note that atomics are included here as +// some atomic values are shared with compiled code. Rust's atomics are +// guaranteed to have the same memory representation as their primitive. +unsafe impl VmSafe for u8 {} +unsafe impl VmSafe for u16 {} +unsafe impl VmSafe for u32 {} +unsafe impl VmSafe for u64 {} +unsafe impl VmSafe for u128 {} +unsafe impl VmSafe for usize {} +unsafe impl VmSafe for i8 {} +unsafe impl VmSafe for i16 {} +unsafe impl VmSafe for i32 {} +unsafe impl VmSafe for i64 {} +unsafe impl VmSafe for i128 {} +unsafe impl VmSafe for isize {} +unsafe impl VmSafe for AtomicUsize {} +unsafe impl VmSafe for AtomicU64 {} + +// This is a small `u32` wrapper defined in `wasmtime-environ`, so impl the +// vm-safe-ness here. +unsafe impl VmSafe for VMSharedTypeIndex {} + +// Core implementations for `VmPtr`. Notably `VMPtr` requires that `T` also +// implements `VmSafe`. Additionally an `Option` wrapper is allowed as that's +// just a nullable pointer. +unsafe impl VmSafe for VmPtr {} +unsafe impl VmSafe for Option> {} diff --git a/crates/wasmtime/src/runtime/vm/store_box.rs b/crates/wasmtime/src/runtime/vm/store_box.rs index 21ac0cdfa14e..0327eba28f71 100644 --- a/crates/wasmtime/src/runtime/vm/store_box.rs +++ b/crates/wasmtime/src/runtime/vm/store_box.rs @@ -1,4 +1,6 @@ use crate::prelude::*; +use crate::runtime::vm::SendSyncPtr; +use core::ptr::NonNull; /// A `Box` lookalike for memory that's stored in a `Store` /// @@ -8,30 +10,28 @@ use crate::prelude::*; /// around without invalidating pointers to the contents within the box. The /// standard `Box` type does not implement this for example and moving that /// will invalidate derived pointers. -pub struct StoreBox(*mut T); - -unsafe impl Send for StoreBox {} -unsafe impl Sync for StoreBox {} +pub struct StoreBox(SendSyncPtr); impl StoreBox { /// Allocates space on the heap to store `val` and returns a pointer to it /// living on the heap. pub fn new(val: T) -> StoreBox { - StoreBox(Box::into_raw(Box::new(val))) + let ptr = Box::into_raw(Box::new(val)); + StoreBox(SendSyncPtr::from(NonNull::new(ptr).unwrap())) } } impl StoreBox { /// Returns the underlying pointer to `T` which is owned by the store. - pub fn get(&self) -> *mut T { - self.0 + pub fn get(&self) -> NonNull { + self.0.as_non_null() } } impl Drop for StoreBox { fn drop(&mut self) { unsafe { - drop(Box::from_raw(self.0)); + drop(Box::from_raw(self.0.as_ptr())); } } } diff --git a/crates/wasmtime/src/runtime/vm/sys/custom/traphandlers.rs b/crates/wasmtime/src/runtime/vm/sys/custom/traphandlers.rs index 24d033ac9837..6f9cab742e7b 100644 --- a/crates/wasmtime/src/runtime/vm/sys/custom/traphandlers.rs +++ b/crates/wasmtime/src/runtime/vm/sys/custom/traphandlers.rs @@ -1,6 +1,7 @@ use crate::prelude::*; use crate::runtime::vm::VMContext; use core::mem; +use core::ptr::NonNull; pub use crate::runtime::vm::sys::capi::{self, wasmtime_longjmp}; @@ -9,15 +10,15 @@ pub type SignalHandler = Box; pub unsafe fn wasmtime_setjmp( jmp_buf: *mut *const u8, - callback: extern "C" fn(*mut u8, *mut VMContext) -> bool, + callback: extern "C" fn(*mut u8, NonNull) -> bool, payload: *mut u8, - callee: *mut VMContext, + callee: NonNull, ) -> bool { let callback = mem::transmute::< - extern "C" fn(*mut u8, *mut VMContext) -> bool, + extern "C" fn(*mut u8, NonNull) -> bool, extern "C" fn(*mut u8, *mut u8) -> bool, >(callback); - capi::wasmtime_setjmp(jmp_buf, callback, payload, callee.cast()) + capi::wasmtime_setjmp(jmp_buf, callback, payload, callee.as_ptr().cast()) } #[cfg(has_native_signals)] diff --git a/crates/wasmtime/src/runtime/vm/sys/miri/traphandlers.rs b/crates/wasmtime/src/runtime/vm/sys/miri/traphandlers.rs index f95bbbd782b9..0f268474e964 100644 --- a/crates/wasmtime/src/runtime/vm/sys/miri/traphandlers.rs +++ b/crates/wasmtime/src/runtime/vm/sys/miri/traphandlers.rs @@ -11,12 +11,13 @@ use crate::prelude::*; use crate::runtime::vm::VMContext; +use core::ptr::NonNull; pub fn wasmtime_setjmp( _jmp_buf: *mut *const u8, - callback: extern "C" fn(*mut u8, *mut VMContext) -> bool, + callback: extern "C" fn(*mut u8, NonNull) -> bool, payload: *mut u8, - callee: *mut VMContext, + callee: NonNull, ) -> bool { callback(payload, callee) } diff --git a/crates/wasmtime/src/runtime/vm/sys/unix/traphandlers.rs b/crates/wasmtime/src/runtime/vm/sys/unix/traphandlers.rs index 667aad675425..f1221c54cc6e 100644 --- a/crates/wasmtime/src/runtime/vm/sys/unix/traphandlers.rs +++ b/crates/wasmtime/src/runtime/vm/sys/unix/traphandlers.rs @@ -1,4 +1,5 @@ use crate::vm::VMContext; +use core::ptr::NonNull; #[link(name = "wasmtime-helpers")] unsafe extern "C" { @@ -6,9 +7,9 @@ unsafe extern "C" { #[allow(improper_ctypes)] pub fn wasmtime_setjmp( jmp_buf: *mut *const u8, - callback: extern "C" fn(*mut u8, *mut VMContext) -> bool, + callback: extern "C" fn(*mut u8, NonNull) -> bool, payload: *mut u8, - callee: *mut VMContext, + callee: NonNull, ) -> bool; #[wasmtime_versioned_export_macros::versioned_link] diff --git a/crates/wasmtime/src/runtime/vm/sys/windows/traphandlers.rs b/crates/wasmtime/src/runtime/vm/sys/windows/traphandlers.rs index 68b4f6e4a594..585c4b9f072c 100644 --- a/crates/wasmtime/src/runtime/vm/sys/windows/traphandlers.rs +++ b/crates/wasmtime/src/runtime/vm/sys/windows/traphandlers.rs @@ -3,6 +3,7 @@ use crate::runtime::vm::traphandlers::{tls, TrapRegisters, TrapTest}; use crate::runtime::vm::VMContext; use std::ffi::c_void; use std::io; +use std::ptr::NonNull; use windows_sys::Win32::Foundation::*; use windows_sys::Win32::System::Diagnostics::Debug::*; use windows_sys::Win32::System::Kernel::*; @@ -13,9 +14,9 @@ unsafe extern "C" { #[allow(improper_ctypes)] pub fn wasmtime_setjmp( jmp_buf: *mut *const u8, - callback: extern "C" fn(*mut u8, *mut VMContext) -> bool, + callback: extern "C" fn(*mut u8, NonNull) -> bool, payload: *mut u8, - callee: *mut VMContext, + callee: NonNull, ) -> bool; #[wasmtime_versioned_export_macros::versioned_link] diff --git a/crates/wasmtime/src/runtime/vm/table.rs b/crates/wasmtime/src/runtime/vm/table.rs index f6c447b58b6b..fad477e26e96 100644 --- a/crates/wasmtime/src/runtime/vm/table.rs +++ b/crates/wasmtime/src/runtime/vm/table.rs @@ -740,25 +740,29 @@ impl Table { match self { Table::Static(StaticTable::Func(StaticFuncTable { data, size, .. })) => { VMTableDefinition { - base: data.as_ptr().cast(), + base: data.cast().into(), current_elements: *size, } } Table::Static(StaticTable::GcRef(StaticGcRefTable { data, size })) => { VMTableDefinition { - base: data.as_ptr().cast(), + base: data.cast().into(), current_elements: *size, } } Table::Dynamic(DynamicTable::Func(DynamicFuncTable { elements, .. })) => { VMTableDefinition { - base: elements.as_mut_ptr().cast(), + base: NonNull::<[FuncTableElem]>::from(&mut elements[..]) + .cast() + .into(), current_elements: elements.len(), } } Table::Dynamic(DynamicTable::GcRef(DynamicGcRefTable { elements, .. })) => { VMTableDefinition { - base: elements.as_mut_ptr().cast(), + base: NonNull::<[Option]>::from(&mut elements[..]) + .cast() + .into(), current_elements: elements.len(), } } diff --git a/crates/wasmtime/src/runtime/vm/traphandlers.rs b/crates/wasmtime/src/runtime/vm/traphandlers.rs index 94a080d60436..0f27ccb8eeed 100644 --- a/crates/wasmtime/src/runtime/vm/traphandlers.rs +++ b/crates/wasmtime/src/runtime/vm/traphandlers.rs @@ -354,7 +354,7 @@ pub unsafe fn catch_traps( mut closure: F, ) -> Result<(), Box> where - F: FnMut(*mut VMContext, Option>) -> bool, + F: FnMut(NonNull, Option>) -> bool, { let caller = store.0.default_caller(); let result = CallThreadState::new(store.0, caller).with(|cx| match store.0.interpreter() { @@ -377,9 +377,9 @@ where None => traphandlers::wasmtime_setjmp( cx.jmp_buf.as_ptr(), { - extern "C" fn call_closure(payload: *mut u8, caller: *mut VMContext) -> bool + extern "C" fn call_closure(payload: *mut u8, caller: NonNull) -> bool where - F: FnMut(*mut VMContext, Option>) -> bool, + F: FnMut(NonNull, Option>) -> bool, { unsafe { (*(payload as *mut F))(caller, None) } } @@ -420,7 +420,7 @@ mod call_thread_state { #[cfg(feature = "coredump")] pub(super) capture_coredump: bool, - pub(crate) limits: *const VMRuntimeLimits, + pub(crate) limits: NonNull, pub(crate) unwinder: &'static dyn Unwind, pub(super) prev: Cell, @@ -447,9 +447,10 @@ mod call_thread_state { debug_assert!(self.unwind.replace(None).is_none()); unsafe { - *(*self.limits).last_wasm_exit_fp.get() = self.old_last_wasm_exit_fp.get(); - *(*self.limits).last_wasm_exit_pc.get() = self.old_last_wasm_exit_pc.get(); - *(*self.limits).last_wasm_entry_fp.get() = self.old_last_wasm_entry_fp.get(); + let limits = self.limits.as_ref(); + *limits.last_wasm_exit_fp.get() = self.old_last_wasm_exit_fp.get(); + *limits.last_wasm_exit_pc.get() = self.old_last_wasm_exit_pc.get(); + *limits.last_wasm_entry_fp.get() = self.old_last_wasm_entry_fp.get(); } } } @@ -458,8 +459,13 @@ mod call_thread_state { pub const JMP_BUF_INTERPRETER_SENTINEL: *mut u8 = 1 as *mut u8; #[inline] - pub(super) fn new(store: &mut StoreOpaque, caller: *mut VMContext) -> CallThreadState { - let limits = unsafe { *Instance::from_vmctx(caller, |i| i.runtime_limits()) }; + pub(super) fn new(store: &mut StoreOpaque, caller: NonNull) -> CallThreadState { + let limits = unsafe { + Instance::from_vmctx(caller, |i| i.runtime_limits()) + .read() + .unwrap() + .as_non_null() + }; // Don't try to plumb #[cfg] everywhere for this field, just pretend // we're using it on miri/windows to silence compiler warnings. @@ -478,9 +484,15 @@ mod call_thread_state { #[cfg(all(has_native_signals, unix))] async_guard_range: store.async_guard_range(), prev: Cell::new(ptr::null()), - old_last_wasm_exit_fp: Cell::new(unsafe { *(*limits).last_wasm_exit_fp.get() }), - old_last_wasm_exit_pc: Cell::new(unsafe { *(*limits).last_wasm_exit_pc.get() }), - old_last_wasm_entry_fp: Cell::new(unsafe { *(*limits).last_wasm_entry_fp.get() }), + old_last_wasm_exit_fp: Cell::new(unsafe { + *limits.as_ref().last_wasm_exit_fp.get() + }), + old_last_wasm_exit_pc: Cell::new(unsafe { + *limits.as_ref().last_wasm_exit_pc.get() + }), + old_last_wasm_entry_fp: Cell::new(unsafe { + *limits.as_ref().last_wasm_entry_fp.get() + }), } } @@ -580,8 +592,8 @@ impl CallThreadState { (None, None) } UnwindReason::Trap(_) => ( - self.capture_backtrace(self.limits, None), - self.capture_coredump(self.limits, None), + self.capture_backtrace(self.limits.as_ptr(), None), + self.capture_coredump(self.limits.as_ptr(), None), ), }; self.unwind.set(Some((reason, backtrace, coredump))); @@ -696,8 +708,8 @@ impl CallThreadState { faulting_addr: Option, trap: wasmtime_environ::Trap, ) { - let backtrace = self.capture_backtrace(self.limits, Some((pc, fp))); - let coredump = self.capture_coredump(self.limits, Some((pc, fp))); + let backtrace = self.capture_backtrace(self.limits.as_ptr(), Some((pc, fp))); + let coredump = self.capture_coredump(self.limits.as_ptr(), Some((pc, fp))); self.unwind.set(Some(( UnwindReason::Trap(TrapReason::Jit { pc, diff --git a/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs b/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs index 717d0c558067..996fd6f22909 100644 --- a/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs +++ b/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs @@ -116,7 +116,7 @@ impl Backtrace { // trampoline did not get a chance to save the last Wasm PC and FP, // and we need to use the plumbed-through values instead. Some((pc, fp)) => { - assert!(core::ptr::eq(limits, state.limits)); + assert!(core::ptr::eq(limits, state.limits.as_ptr())); (pc, fp) } // Either there is no Wasm currently on the stack, or we exited Wasm @@ -136,7 +136,7 @@ impl Backtrace { .chain( state .iter() - .filter(|state| core::ptr::eq(limits, state.limits)) + .filter(|state| core::ptr::eq(limits, state.limits.as_ptr())) .map(|state| { ( state.old_last_wasm_exit_pc(), diff --git a/crates/wasmtime/src/runtime/vm/vmcontext.rs b/crates/wasmtime/src/runtime/vm/vmcontext.rs index 450cfde37a0b..0f1d5de54f71 100644 --- a/crates/wasmtime/src/runtime/vm/vmcontext.rs +++ b/crates/wasmtime/src/runtime/vm/vmcontext.rs @@ -5,7 +5,7 @@ mod vm_host_func_context; pub use self::vm_host_func_context::VMArrayCallHostFuncContext; use crate::prelude::*; -use crate::runtime::vm::{GcStore, InterpreterRef, VMGcRef}; +use crate::runtime::vm::{GcStore, InterpreterRef, VMGcRef, VmPtr, VmSafe}; use crate::store::StoreOpaque; use core::cell::UnsafeCell; use core::ffi::c_void; @@ -42,8 +42,12 @@ use wasmtime_environ::{ /// /// * `true` if this call succeeded. /// * `false` if this call failed and a trap was recorded in TLS. -pub type VMArrayCallNative = - unsafe extern "C" fn(*mut VMOpaqueContext, *mut VMOpaqueContext, *mut ValRaw, usize) -> bool; +pub type VMArrayCallNative = unsafe extern "C" fn( + NonNull, + NonNull, + NonNull, + usize, +) -> bool; /// An opaque function pointer which might be `VMArrayCallNative` or it might be /// pulley bytecode. Requires external knowledge to determine what kind of @@ -67,11 +71,11 @@ pub struct VMWasmCallFunction(VMFunctionBody); #[repr(C)] pub struct VMFunctionImport { /// Function pointer to use when calling this imported function from Wasm. - pub wasm_call: NonNull, + pub wasm_call: VmPtr, /// Function pointer to use when calling this imported function with the /// "array" calling convention that `Func::new` et al use. - pub array_call: NonNull, + pub array_call: VmPtr, /// The VM state associated with this function. /// @@ -79,13 +83,11 @@ pub struct VMFunctionImport { /// VMContext`, but for lifted/lowered component model functions this will /// be a `VMComponentContext`, and for a host function it will be a /// `VMHostFuncContext`, etc. - pub vmctx: *mut VMOpaqueContext, + pub vmctx: VmPtr, } -// Declare that this type is send/sync, it's the responsibility of users of -// `VMFunctionImport` to uphold this guarantee. -unsafe impl Send for VMFunctionImport {} -unsafe impl Sync for VMFunctionImport {} +// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields. +unsafe impl VmSafe for VMFunctionImport {} #[cfg(test)] mod test_vmfunction_import { @@ -124,6 +126,9 @@ mod test_vmfunction_import { #[repr(C)] pub struct VMFunctionBody(u8); +// SAFETY: this structure is never read and is safe to pass to jit code. +unsafe impl VmSafe for VMFunctionBody {} + #[cfg(test)] mod test_vmfunction_body { use super::VMFunctionBody; @@ -141,16 +146,14 @@ mod test_vmfunction_body { #[repr(C)] pub struct VMTableImport { /// A pointer to the imported table description. - pub from: *mut VMTableDefinition, + pub from: VmPtr, /// A pointer to the `VMContext` that owns the table description. - pub vmctx: *mut VMContext, + pub vmctx: VmPtr, } -// Declare that this type is send/sync, it's the responsibility of users of -// `VMTableImport` to uphold this guarantee. -unsafe impl Send for VMTableImport {} -unsafe impl Sync for VMTableImport {} +// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields. +unsafe impl VmSafe for VMTableImport {} #[cfg(test)] mod test_vmtable_import { @@ -184,19 +187,17 @@ mod test_vmtable_import { #[repr(C)] pub struct VMMemoryImport { /// A pointer to the imported memory description. - pub from: *mut VMMemoryDefinition, + pub from: VmPtr, /// A pointer to the `VMContext` that owns the memory description. - pub vmctx: *mut VMContext, + pub vmctx: VmPtr, /// The index of the memory in the containing `vmctx`. pub index: DefinedMemoryIndex, } -// Declare that this type is send/sync, it's the responsibility of users of -// `VMMemoryImport` to uphold this guarantee. -unsafe impl Send for VMMemoryImport {} -unsafe impl Sync for VMMemoryImport {} +// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields. +unsafe impl VmSafe for VMMemoryImport {} #[cfg(test)] mod test_vmmemory_import { @@ -234,13 +235,11 @@ mod test_vmmemory_import { #[repr(C)] pub struct VMGlobalImport { /// A pointer to the imported global variable description. - pub from: *mut VMGlobalDefinition, + pub from: VmPtr, } -// Declare that this type is send/sync, it's the responsibility of users of -// `VMGlobalImport` to uphold this guarantee. -unsafe impl Send for VMGlobalImport {} -unsafe impl Sync for VMGlobalImport {} +// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields. +unsafe impl VmSafe for VMGlobalImport {} #[cfg(test)] mod test_vmglobal_import { @@ -271,7 +270,7 @@ mod test_vmglobal_import { #[repr(C)] pub struct VMMemoryDefinition { /// The start address. - pub base: *mut u8, + pub base: VmPtr, /// The current logical size of this linear memory in bytes. /// @@ -281,6 +280,10 @@ pub struct VMMemoryDefinition { pub current_length: AtomicUsize, } +// SAFETY: the above definition has `repr(C)` and each field individually +// implements `VmSafe`, which satisfies the requirements of this trait. +unsafe impl VmSafe for VMMemoryDefinition {} + impl VMMemoryDefinition { /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by /// performing a relaxed load; do not use this function for situations in @@ -343,12 +346,15 @@ mod test_vmmemory_definition { #[repr(C)] pub struct VMTableDefinition { /// Pointer to the table data. - pub base: *mut u8, + pub base: VmPtr, /// The current number of elements in the table. pub current_elements: usize, } +// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields. +unsafe impl VmSafe for VMTableDefinition {} + #[cfg(test)] mod test_vmtable_definition { use super::VMTableDefinition; @@ -386,6 +392,9 @@ pub struct VMGlobalDefinition { // If more elements are added here, remember to add offset_of tests below! } +// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields. +unsafe impl VmSafe for VMGlobalDefinition {} + #[cfg(test)] mod test_vmglobal_definition { use super::VMGlobalDefinition; @@ -669,7 +678,7 @@ mod test_vmshared_type_index { pub struct VMFuncRef { /// Function pointer for this funcref if being called via the "array" /// calling convention that `Func::new` et al use. - pub array_call: NonNull, + pub array_call: VmPtr, /// Function pointer for this funcref if being called via the calling /// convention we use when compiling Wasm. @@ -689,7 +698,7 @@ pub struct VMFuncRef { /// it means that the Wasm cannot actually call this function. But it does /// mean that this field needs to be an `Option` even though it is non-null /// the vast vast vast majority of the time. - pub wasm_call: Option>, + pub wasm_call: Option>, /// Function signature's type id. pub type_index: VMSharedTypeIndex, @@ -700,12 +709,12 @@ pub struct VMFuncRef { /// function being referenced: for core Wasm functions, this is a `*mut /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for /// component functions it is a `*mut VMComponentContext`. - pub vmctx: *mut VMOpaqueContext, + pub vmctx: VmPtr, // If more elements are added here, remember to add offset_of tests below! } -unsafe impl Send for VMFuncRef {} -unsafe impl Sync for VMFuncRef {} +// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields. +unsafe impl VmSafe for VMFuncRef {} impl VMFuncRef { /// Invokes the `array_call` field of this `VMFuncRef` with the supplied @@ -736,8 +745,8 @@ impl VMFuncRef { pub unsafe fn array_call( &self, pulley: Option>, - caller: *mut VMOpaqueContext, - args_and_results: *mut [ValRaw], + caller: NonNull, + args_and_results: NonNull<[ValRaw]>, ) -> bool { match pulley { Some(vm) => self.array_call_interpreted(vm, caller, args_and_results), @@ -748,33 +757,40 @@ impl VMFuncRef { unsafe fn array_call_interpreted( &self, vm: InterpreterRef<'_>, - caller: *mut VMOpaqueContext, - args_and_results: *mut [ValRaw], + caller: NonNull, + args_and_results: NonNull<[ValRaw]>, ) -> bool { // If `caller` is actually a `VMArrayCallHostFuncContext` then skip the // interpreter, even though it's available, as `array_call` will be // native code. - if (*self.vmctx).magic == wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC { + if self.vmctx.as_non_null().as_ref().magic + == wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC + { return self.array_call_native(caller, args_and_results); } - vm.call(self.array_call.cast(), self.vmctx, caller, args_and_results) + vm.call( + self.array_call.as_non_null().cast(), + self.vmctx.as_non_null(), + caller, + args_and_results, + ) } unsafe fn array_call_native( &self, - caller: *mut VMOpaqueContext, - args_and_results: *mut [ValRaw], + caller: NonNull, + args_and_results: NonNull<[ValRaw]>, ) -> bool { union GetNativePointer { native: VMArrayCallNative, ptr: NonNull, } let native = GetNativePointer { - ptr: self.array_call, + ptr: self.array_call.as_non_null(), } .native; native( - self.vmctx, + self.vmctx.as_non_null(), caller, args_and_results.cast(), args_and_results.len(), @@ -849,9 +865,12 @@ macro_rules! define_builtin_array { (@ty u8) => (u8); (@ty bool) => (bool); (@ty pointer) => (*mut u8); - (@ty vmctx) => (*mut VMContext); + (@ty vmctx) => (VmPtr); } +// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields. +unsafe impl VmSafe for VMBuiltinFunctionsArray {} + wasmtime_environ::foreach_builtin_function!(define_builtin_array); const _: () = { @@ -939,6 +958,9 @@ pub struct VMRuntimeLimits { unsafe impl Send for VMRuntimeLimits {} unsafe impl Sync for VMRuntimeLimits {} +// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields. +unsafe impl VmSafe for VMRuntimeLimits {} + impl Default for VMRuntimeLimits { fn default() -> VMRuntimeLimits { VMRuntimeLimits { @@ -1014,7 +1036,7 @@ impl VMContext { /// Helper function to cast between context types using a debug assertion to /// protect against some mistakes. #[inline] - pub unsafe fn from_opaque(opaque: *mut VMOpaqueContext) -> *mut VMContext { + pub unsafe fn from_opaque(opaque: NonNull) -> NonNull { // Note that in general the offset of the "magic" field is stored in // `VMOffsets::vmctx_magic`. Given though that this is a sanity check // about converting this pointer to another type we ideally don't want @@ -1030,7 +1052,7 @@ impl VMContext { // bugs, meaning we don't actually read the magic and act differently // at runtime depending what it is, so this is a debug assertion as // opposed to a regular assertion. - debug_assert_eq!((*opaque).magic, VMCONTEXT_MAGIC); + debug_assert_eq!(opaque.as_ref().magic, VMCONTEXT_MAGIC); opaque.cast() } } @@ -1350,15 +1372,15 @@ pub struct VMOpaqueContext { impl VMOpaqueContext { /// Helper function to clearly indicate that casts are desired. #[inline] - pub fn from_vmcontext(ptr: *mut VMContext) -> *mut VMOpaqueContext { + pub fn from_vmcontext(ptr: NonNull) -> NonNull { ptr.cast() } /// Helper function to clearly indicate that casts are desired. #[inline] pub fn from_vm_array_call_host_func_context( - ptr: *mut VMArrayCallHostFuncContext, - ) -> *mut VMOpaqueContext { + ptr: NonNull, + ) -> NonNull { ptr.cast() } } diff --git a/crates/wasmtime/src/runtime/vm/vmcontext/vm_host_func_context.rs b/crates/wasmtime/src/runtime/vm/vmcontext/vm_host_func_context.rs index 3976be30bb92..f443970ab3da 100644 --- a/crates/wasmtime/src/runtime/vm/vmcontext/vm_host_func_context.rs +++ b/crates/wasmtime/src/runtime/vm/vmcontext/vm_host_func_context.rs @@ -6,7 +6,7 @@ use super::{VMArrayCallNative, VMOpaqueContext}; use crate::prelude::*; use crate::runtime::vm::{StoreBox, VMFuncRef}; use core::any::Any; -use core::ptr::{self, NonNull}; +use core::ptr::NonNull; use wasmtime_environ::{VMSharedTypeIndex, VM_ARRAY_CALL_HOST_FUNC_MAGIC}; /// The `VM*Context` for array-call host functions. @@ -38,16 +38,16 @@ impl VMArrayCallHostFuncContext { let ctx = StoreBox::new(VMArrayCallHostFuncContext { magic: wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC, func_ref: VMFuncRef { - array_call: NonNull::new(host_func as *mut u8).unwrap().cast(), + array_call: NonNull::new(host_func as *mut u8).unwrap().cast().into(), type_index, wasm_call: None, - vmctx: ptr::null_mut(), + vmctx: NonNull::dangling().into(), }, host_state, }); let vmctx = VMOpaqueContext::from_vm_array_call_host_func_context(ctx.get()); unsafe { - (*ctx.get()).func_ref.vmctx = vmctx; + ctx.get().as_mut().func_ref.vmctx = vmctx.into(); } ctx } @@ -67,9 +67,11 @@ impl VMArrayCallHostFuncContext { /// Helper function to cast between context types using a debug assertion to /// protect against some mistakes. #[inline] - pub unsafe fn from_opaque(opaque: *mut VMOpaqueContext) -> *mut VMArrayCallHostFuncContext { + pub unsafe fn from_opaque( + opaque: NonNull, + ) -> NonNull { // See comments in `VMContext::from_opaque` for this debug assert - debug_assert_eq!((*opaque).magic, VM_ARRAY_CALL_HOST_FUNC_MAGIC); + debug_assert_eq!(opaque.as_ref().magic, VM_ARRAY_CALL_HOST_FUNC_MAGIC); opaque.cast() } }