From fc0c43ef7b6d6f4dcddeacb7207d5f421437bfb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mi=C5=82osz=20Rachwa=C5=82?= Date: Tue, 26 Mar 2024 18:28:00 +0100 Subject: [PATCH] Module creation from premapped images --- crates/c-api/include/wasmtime/module.h | 16 + crates/c-api/src/module.rs | 27 ++ crates/wasmtime/src/compile/code_builder.rs | 6 +- crates/wasmtime/src/engine.rs | 24 +- crates/wasmtime/src/runtime/code_memory.rs | 416 ++++++++++++------ .../src/runtime/component/component.rs | 25 +- crates/wasmtime/src/runtime/instantiate.rs | 17 +- crates/wasmtime/src/runtime/module.rs | 24 +- .../wasmtime/src/runtime/trampoline/func.rs | 4 +- 9 files changed, 409 insertions(+), 150 deletions(-) diff --git a/crates/c-api/include/wasmtime/module.h b/crates/c-api/include/wasmtime/module.h index 0460798ede30..57c76cf8e988 100644 --- a/crates/c-api/include/wasmtime/module.h +++ b/crates/c-api/include/wasmtime/module.h @@ -133,6 +133,22 @@ WASM_API_EXTERN wasmtime_error_t * wasmtime_module_deserialize_file(wasm_engine_t *engine, const char *path, wasmtime_module_t **ret); +/** + * \brief Create a module from already mapped memory image. + * + * This function creates a module for which data is already mapped with proper + * permission flags in the host address space. + * + * If `finalizer` is provided it will be called with `data` argument when + * module is destroyed. + * + * This function does not take ownership of any of its arguments, but the + * returned error and module are owned by the caller. + */ +WASM_API_EXTERN wasmtime_error_t *wasmtime_module_from_premapped_image( + wasm_engine_t *engine, const uint8_t *image, size_t size, void *data, + void (*finalizer)(void *), wasmtime_module_t **ret); + /** * \brief Returns the range of bytes in memory where this module’s compilation * image resides. diff --git a/crates/c-api/src/module.rs b/crates/c-api/src/module.rs index 970e7ba27ca6..e2a3913dd5f2 100644 --- a/crates/c-api/src/module.rs +++ b/crates/c-api/src/module.rs @@ -3,7 +3,9 @@ use crate::{ wasm_importtype_t, wasm_importtype_vec_t, wasm_store_t, wasmtime_error_t, CExternType, }; use anyhow::Context; +use std::ffi::c_void; use std::ffi::CStr; +use std::ops::Range; use std::os::raw::c_char; use wasmtime::{Engine, Module}; @@ -222,3 +224,28 @@ pub unsafe extern "C" fn wasmtime_module_deserialize_file( *out = Box::into_raw(Box::new(wasmtime_module_t { module })); }) } + +#[no_mangle] +pub unsafe extern "C" fn wasmtime_module_from_premapped_image( + engine: &wasm_engine_t, + image: *const u8, + size: usize, + data: *mut c_void, + finalizer: Option, + out: &mut *mut wasmtime_module_t, +) -> Option> { + let foreign = crate::ForeignData { data, finalizer }; + let result = Module::from_premapped_image( + &engine.engine, + Range { + start: image, + end: image.offset(size as isize), + }, + move || { + let _ = &foreign; + }, + ); + handle_result(result, |module| { + *out = Box::into_raw(Box::new(wasmtime_module_t { module })); + }) +} diff --git a/crates/wasmtime/src/compile/code_builder.rs b/crates/wasmtime/src/compile/code_builder.rs index fb026d2a1ac3..84c25fe4cb70 100644 --- a/crates/wasmtime/src/compile/code_builder.rs +++ b/crates/wasmtime/src/compile/code_builder.rs @@ -177,7 +177,9 @@ impl<'a> CodeBuilder<'a> { Ok((code, info)) }, // Implementation of how to serialize artifacts - |(_engine, _wasm, _), (code, _info_and_types)| Some(code.mmap().to_vec()), + |(_engine, _wasm, _), (code, _info_and_types)| { + Some(code.image_slice().to_vec()) + }, // Cache hit, deserialize the provided artifacts |(engine, _wasm, _), serialized_bytes| { let code = engine @@ -290,7 +292,7 @@ impl std::hash::Hash for HashedEngineCompileEnv<'_> { #[cfg(feature = "runtime")] fn publish_mmap(mmap: MmapVec) -> Result> { - let mut code = CodeMemory::new(mmap)?; + let mut code = CodeMemory::new_from_mmap(mmap)?; code.publish()?; Ok(Arc::new(code)) } diff --git a/crates/wasmtime/src/engine.rs b/crates/wasmtime/src/engine.rs index 0e6f16cf2ab8..61f6809cdae2 100644 --- a/crates/wasmtime/src/engine.rs +++ b/crates/wasmtime/src/engine.rs @@ -5,6 +5,7 @@ use anyhow::{Context, Result}; use object::write::{Object, StandardSegment}; use object::SectionKind; use once_cell::sync::OnceCell; +use std::ops::Range; use std::path::Path; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; @@ -703,13 +704,34 @@ impl Engine { ) } + /// Like `load_code_bytes`, but uses image already mapped in the host + /// address space. + pub(crate) unsafe fn load_code_premapped( + &self, + image_range: Range<*const u8>, + finalizer: impl FnOnce() + Send + Sync + 'static, + expected: ObjectKind, + ) -> Result> { + serialization::check_compatible( + self, + &std::slice::from_raw_parts( + image_range.start, + image_range.end.offset_from(image_range.start) as usize, + ), + expected, + )?; + let mut code = crate::CodeMemory::new_from_premapped_image(image_range, finalizer)?; + code.publish()?; + Ok(Arc::new(code)) + } + pub(crate) fn load_code( &self, mmap: wasmtime_runtime::MmapVec, expected: ObjectKind, ) -> Result> { serialization::check_compatible(self, &mmap, expected)?; - let mut code = crate::CodeMemory::new(mmap)?; + let mut code = crate::CodeMemory::new_from_mmap(mmap)?; code.publish()?; Ok(Arc::new(code)) } diff --git a/crates/wasmtime/src/runtime/code_memory.rs b/crates/wasmtime/src/runtime/code_memory.rs index 983a0c5be5bd..912a481f9e35 100644 --- a/crates/wasmtime/src/runtime/code_memory.rs +++ b/crates/wasmtime/src/runtime/code_memory.rs @@ -9,11 +9,52 @@ use wasmtime_environ::obj; use wasmtime_jit_icache_coherence as icache_coherence; use wasmtime_runtime::{libcalls, MmapVec, UnwindRegistration}; +trait CodeMemoryProvider { + fn mmap(&self) -> Option<&MmapVec>; + fn image_slice(&self) -> &[u8]; + fn publish(&mut self) -> Result<()>; +} + +/// `CodeMemory` provider that uses already mapped range in the host +/// address space +struct CodeMemoryRangeProvider { + range: Range<*const u8>, + finalizer: Option>, +} + +unsafe impl Send for CodeMemoryRangeProvider {} +unsafe impl Sync for CodeMemoryRangeProvider {} + +impl CodeMemoryProvider for CodeMemoryRangeProvider { + fn mmap(&self) -> Option<&MmapVec> { + None + } + + fn image_slice(&self) -> &[u8] { + unsafe { + &std::slice::from_raw_parts( + self.range.start, + self.range.end.offset_from(self.range.start) as usize, + ) + } + } + + fn publish(&mut self) -> Result<()> { + Ok(()) + } +} + +impl Drop for CodeMemoryRangeProvider { + fn drop(&mut self) { + self.finalizer.take().unwrap()(); + } +} + /// Management of executable memory within a `MmapVec` /// /// This type consumes ownership of a region of memory and will manage the /// executable permissions of the contained JIT code as necessary. -pub struct CodeMemory { +struct CodeMemoryMmapProvider { // NB: these are `ManuallyDrop` because `unwind_registration` must be // dropped first since it refers to memory owned by `mmap`. mmap: ManuallyDrop, @@ -23,18 +64,128 @@ pub struct CodeMemory { relocations: Vec<(usize, obj::LibCall)>, - // Ranges within `self.mmap` of where the particular sections lie. text: Range, unwind: Range, - trap_data: Range, - wasm_data: Range, - address_map_data: Range, - func_name_data: Range, - info_data: Range, - dwarf: Range, } -impl Drop for CodeMemory { +impl CodeMemoryProvider for CodeMemoryMmapProvider { + fn mmap(&self) -> Option<&MmapVec> { + Some(&self.mmap) + } + + fn image_slice(&self) -> &[u8] { + &self.mmap[..] + } + + fn publish(&mut self) -> Result<()> { + assert!(!self.published); + self.published = true; + + if self.text.is_empty() { + return Ok(()); + } + + // The unsafety here comes from a few things: + // + // * We're actually updating some page protections to executable memory. + // + // * We're registering unwinding information which relies on the + // correctness of the information in the first place. This applies to + // both the actual unwinding tables as well as the validity of the + // pointers we pass in itself. + unsafe { + // First, if necessary, apply relocations. This can happen for + // things like libcalls which happen late in the lowering process + // that don't go through the Wasm-based libcalls layer that's + // indirected through the `VMContext`. Note that most modules won't + // have relocations, so this typically doesn't do anything. + self.apply_relocations()?; + + // Next freeze the contents of this image by making all of the + // memory readonly. Nothing after this point should ever be modified + // so commit everything. For a compiled-in-memory image this will + // mean IPIs to evict writable mappings from other cores. For + // loaded-from-disk images this shouldn't result in IPIs so long as + // there weren't any relocations because nothing should have + // otherwise written to the image at any point either. + self.mmap.make_readonly(0..self.mmap.len())?; + + let text = &self.mmap[self.text.clone()]; + + // Clear the newly allocated code from cache if the processor requires it + // + // Do this before marking the memory as R+X, technically we should be able to do it after + // but there are some CPU's that have had errata about doing this with read only memory. + icache_coherence::clear_cache(text.as_ptr().cast(), text.len()) + .expect("Failed cache clear"); + + // Switch the executable portion from readonly to read/execute. + self.mmap + .make_executable(self.text.clone(), self.enable_branch_protection) + .context("unable to make memory executable")?; + + // Flush any in-flight instructions from the pipeline + icache_coherence::pipeline_flush_mt().expect("Failed pipeline flush"); + + // With all our memory set up use the platform-specific + // `UnwindRegistration` implementation to inform the general + // runtime that there's unwinding information available for all + // our just-published JIT functions. + self.register_unwind_info()?; + } + + Ok(()) + } +} + +impl CodeMemoryMmapProvider { + unsafe fn apply_relocations(&mut self) -> Result<()> { + if self.relocations.is_empty() { + return Ok(()); + } + + for (offset, libcall) in self.relocations.iter() { + let offset = self.text.start + offset; + let libcall = match libcall { + obj::LibCall::FloorF32 => libcalls::relocs::floorf32 as usize, + obj::LibCall::FloorF64 => libcalls::relocs::floorf64 as usize, + obj::LibCall::NearestF32 => libcalls::relocs::nearestf32 as usize, + obj::LibCall::NearestF64 => libcalls::relocs::nearestf64 as usize, + obj::LibCall::CeilF32 => libcalls::relocs::ceilf32 as usize, + obj::LibCall::CeilF64 => libcalls::relocs::ceilf64 as usize, + obj::LibCall::TruncF32 => libcalls::relocs::truncf32 as usize, + obj::LibCall::TruncF64 => libcalls::relocs::truncf64 as usize, + obj::LibCall::FmaF32 => libcalls::relocs::fmaf32 as usize, + obj::LibCall::FmaF64 => libcalls::relocs::fmaf64 as usize, + #[cfg(target_arch = "x86_64")] + obj::LibCall::X86Pshufb => libcalls::relocs::x86_pshufb as usize, + #[cfg(not(target_arch = "x86_64"))] + obj::LibCall::X86Pshufb => unreachable!(), + }; + self.mmap + .as_mut_ptr() + .add(offset) + .cast::() + .write_unaligned(libcall); + } + Ok(()) + } + + unsafe fn register_unwind_info(&mut self) -> Result<()> { + if self.unwind.len() == 0 { + return Ok(()); + } + let text = &self.mmap[self.text.clone()]; + let unwind_info = &self.mmap[self.unwind.clone()]; + let registration = + UnwindRegistration::new(text.as_ptr(), unwind_info.as_ptr(), unwind_info.len()) + .context("failed to create unwind info registration")?; + *self.unwind_registration = Some(registration); + Ok(()) + } +} + +impl Drop for CodeMemoryMmapProvider { fn drop(&mut self) { // Drop `unwind_registration` before `self.mmap` unsafe { @@ -44,19 +195,41 @@ impl Drop for CodeMemory { } } -fn _assert() { - fn _assert_send_sync() {} - _assert_send_sync::(); +/// Wrapper for executable memory. +/// +/// It uses `CodeMemoryProvider` trait to either wrap managed mmaped memory +/// or just use previously mapped host memory. +pub struct CodeMemory { + provider: Box, + + // Ranges within `provider.image_slice()` of where the particular sections lie. + text: Range, + trap_data: Range, + wasm_data: Range, + address_map_data: Range, + func_name_data: Range, + info_data: Range, + dwarf: Range, +} + +struct ImageInfo { + enable_branch_protection: bool, + relocations: Vec<(usize, obj::LibCall)>, + text: Range, + unwind: Range, + trap_data: Range, + wasm_data: Range, + address_map_data: Range, + func_name_data: Range, + info_data: Range, + dwarf: Range, } impl CodeMemory { - /// Creates a new `CodeMemory` by taking ownership of the provided - /// `MmapVec`. - /// - /// The returned `CodeMemory` manages the internal `MmapVec` and the - /// `publish` method is used to actually make the memory executable. - pub fn new(mmap: MmapVec) -> Result { - let obj = File::parse(&mmap[..]) + /// Parses wasmtime ELF file passed as memory slice + /// into `ImageInfo` struct. + fn parse(image_slice: &[u8]) -> Result { + let obj = File::parse(image_slice) .with_context(|| "failed to parse internal compilation artifact")?; let mut relocations = Vec::new(); @@ -72,11 +245,11 @@ impl CodeMemory { for section in obj.sections() { let data = section.data()?; let name = section.name()?; - let range = subslice_range(data, &mmap); + let range = subslice_range(data, &image_slice); // Double-check that sections are all aligned properly. if section.align() != 0 && data.len() != 0 { - if (data.as_ptr() as u64 - mmap.as_ptr() as u64) % section.align() != 0 { + if (data.as_ptr() as u64 - image_slice.as_ptr() as u64) % section.align() != 0 { bail!( "section `{}` isn't aligned to {:#x}", section.name().unwrap_or("ERROR"), @@ -127,12 +300,11 @@ impl CodeMemory { _ => log::debug!("ignoring section {name}"), } } - Ok(Self { - mmap: ManuallyDrop::new(mmap), - unwind_registration: ManuallyDrop::new(None), - published: false, + + Ok(ImageInfo { enable_branch_protection: enable_branch_protection .ok_or_else(|| anyhow!("missing `{}` section", obj::ELF_WASM_BTI))?, + relocations, text, unwind, trap_data, @@ -141,33 +313,104 @@ impl CodeMemory { dwarf, info_data, wasm_data, - relocations, + }) + } + + /// Creates a new `CodeMemory` from already mapped host address range. + /// + /// `finalizer` is called when `CodeMemory` is destroyed. + pub unsafe fn new_from_premapped_image( + image_range: Range<*const u8>, + finalizer: impl FnOnce() + Send + Sync + 'static, + ) -> Result { + let image_slice = std::slice::from_raw_parts( + image_range.start, + image_range.end.offset_from(image_range.start) as usize, + ); + + let info = Self::parse(&image_slice)?; + + Ok(Self { + provider: Box::new(CodeMemoryRangeProvider { + range: image_range, + finalizer: Some(Box::new(finalizer)), + }), + text: info.text, + trap_data: info.trap_data, + address_map_data: info.address_map_data, + func_name_data: info.func_name_data, + dwarf: info.dwarf, + info_data: info.info_data, + wasm_data: info.wasm_data, + }) + } + + /// Creates a new `CodeMemory` by taking ownership of the provided + /// `MmapVec`. + /// + /// The returned `CodeMemory` manages the internal `MmapVec` and the + /// `publish` method is used to actually make the memory executable. + pub fn new_from_mmap(mmap: MmapVec) -> Result { + let info = Self::parse(&mmap[..])?; + + Ok(Self { + provider: Box::new(CodeMemoryMmapProvider { + mmap: ManuallyDrop::new(mmap), + unwind_registration: ManuallyDrop::new(None), + published: false, + enable_branch_protection: info.enable_branch_protection, + relocations: info.relocations, + text: info.text.clone(), + unwind: info.unwind.clone(), + }), + text: info.text, + trap_data: info.trap_data, + address_map_data: info.address_map_data, + func_name_data: info.func_name_data, + dwarf: info.dwarf, + info_data: info.info_data, + wasm_data: info.wasm_data, }) } /// Returns a reference to the underlying `MmapVec` this memory owns. #[inline] - pub fn mmap(&self) -> &MmapVec { - &self.mmap + pub fn mmap(&self) -> Option<&MmapVec> { + self.provider.mmap() + } + + /// Returns host memory range for the entire image + #[inline] + pub fn image_range(&self) -> Range<*const u8> { + let slice = self.provider.image_slice(); + let base = slice.as_ptr(); + let len = slice.len(); + base..base.wrapping_add(len) + } + + /// Returns slice for the entire image + #[inline] + pub fn image_slice(&self) -> &[u8] { + self.provider.image_slice() } /// Returns the contents of the text section of the ELF executable this /// represents. #[inline] pub fn text(&self) -> &[u8] { - &self.mmap[self.text.clone()] + &self.provider.image_slice()[self.text.clone()] } /// Returns the contents of the `ELF_WASMTIME_DWARF` section. #[inline] pub fn dwarf(&self) -> &[u8] { - &self.mmap[self.dwarf.clone()] + &self.provider.image_slice()[self.dwarf.clone()] } /// Returns the data in the `ELF_NAME_DATA` section. #[inline] pub fn func_name_data(&self) -> &[u8] { - &self.mmap[self.func_name_data.clone()] + &self.provider.image_slice()[self.func_name_data.clone()] } /// Returns the concatenated list of all data associated with this wasm @@ -177,28 +420,28 @@ impl CodeMemory { /// in a `Module` are relative to the slice returned here. #[inline] pub fn wasm_data(&self) -> &[u8] { - &self.mmap[self.wasm_data.clone()] + &self.provider.image_slice()[self.wasm_data.clone()] } /// Returns the encoded address map section used to pass to /// `wasmtime_environ::lookup_file_pos`. #[inline] pub fn address_map_data(&self) -> &[u8] { - &self.mmap[self.address_map_data.clone()] + &self.provider.image_slice()[self.address_map_data.clone()] } /// Returns the contents of the `ELF_WASMTIME_INFO` section, or an empty /// slice if it wasn't found. #[inline] pub fn wasmtime_info(&self) -> &[u8] { - &self.mmap[self.info_data.clone()] + &self.provider.image_slice()[self.info_data.clone()] } /// Returns the contents of the `ELF_WASMTIME_TRAPS` section, or an empty /// slice if it wasn't found. #[inline] pub fn trap_data(&self) -> &[u8] { - &self.mmap[self.trap_data.clone()] + &self.provider.image_slice()[self.trap_data.clone()] } /// Publishes the internal ELF image to be ready for execution. @@ -212,108 +455,7 @@ impl CodeMemory { /// /// After this function executes all JIT code should be ready to execute. pub fn publish(&mut self) -> Result<()> { - assert!(!self.published); - self.published = true; - - if self.text().is_empty() { - return Ok(()); - } - - // The unsafety here comes from a few things: - // - // * We're actually updating some page protections to executable memory. - // - // * We're registering unwinding information which relies on the - // correctness of the information in the first place. This applies to - // both the actual unwinding tables as well as the validity of the - // pointers we pass in itself. - unsafe { - // First, if necessary, apply relocations. This can happen for - // things like libcalls which happen late in the lowering process - // that don't go through the Wasm-based libcalls layer that's - // indirected through the `VMContext`. Note that most modules won't - // have relocations, so this typically doesn't do anything. - self.apply_relocations()?; - - // Next freeze the contents of this image by making all of the - // memory readonly. Nothing after this point should ever be modified - // so commit everything. For a compiled-in-memory image this will - // mean IPIs to evict writable mappings from other cores. For - // loaded-from-disk images this shouldn't result in IPIs so long as - // there weren't any relocations because nothing should have - // otherwise written to the image at any point either. - self.mmap.make_readonly(0..self.mmap.len())?; - - let text = self.text(); - - // Clear the newly allocated code from cache if the processor requires it - // - // Do this before marking the memory as R+X, technically we should be able to do it after - // but there are some CPU's that have had errata about doing this with read only memory. - icache_coherence::clear_cache(text.as_ptr().cast(), text.len()) - .expect("Failed cache clear"); - - // Switch the executable portion from readonly to read/execute. - self.mmap - .make_executable(self.text.clone(), self.enable_branch_protection) - .context("unable to make memory executable")?; - - // Flush any in-flight instructions from the pipeline - icache_coherence::pipeline_flush_mt().expect("Failed pipeline flush"); - - // With all our memory set up use the platform-specific - // `UnwindRegistration` implementation to inform the general - // runtime that there's unwinding information available for all - // our just-published JIT functions. - self.register_unwind_info()?; - } - - Ok(()) - } - - unsafe fn apply_relocations(&mut self) -> Result<()> { - if self.relocations.is_empty() { - return Ok(()); - } - - for (offset, libcall) in self.relocations.iter() { - let offset = self.text.start + offset; - let libcall = match libcall { - obj::LibCall::FloorF32 => libcalls::relocs::floorf32 as usize, - obj::LibCall::FloorF64 => libcalls::relocs::floorf64 as usize, - obj::LibCall::NearestF32 => libcalls::relocs::nearestf32 as usize, - obj::LibCall::NearestF64 => libcalls::relocs::nearestf64 as usize, - obj::LibCall::CeilF32 => libcalls::relocs::ceilf32 as usize, - obj::LibCall::CeilF64 => libcalls::relocs::ceilf64 as usize, - obj::LibCall::TruncF32 => libcalls::relocs::truncf32 as usize, - obj::LibCall::TruncF64 => libcalls::relocs::truncf64 as usize, - obj::LibCall::FmaF32 => libcalls::relocs::fmaf32 as usize, - obj::LibCall::FmaF64 => libcalls::relocs::fmaf64 as usize, - #[cfg(target_arch = "x86_64")] - obj::LibCall::X86Pshufb => libcalls::relocs::x86_pshufb as usize, - #[cfg(not(target_arch = "x86_64"))] - obj::LibCall::X86Pshufb => unreachable!(), - }; - self.mmap - .as_mut_ptr() - .add(offset) - .cast::() - .write_unaligned(libcall); - } - Ok(()) - } - - unsafe fn register_unwind_info(&mut self) -> Result<()> { - if self.unwind.len() == 0 { - return Ok(()); - } - let text = self.text(); - let unwind_info = &self.mmap[self.unwind.clone()]; - let registration = - UnwindRegistration::new(text.as_ptr(), unwind_info.as_ptr(), unwind_info.len()) - .context("failed to create unwind info registration")?; - *self.unwind_registration = Some(registration); - Ok(()) + self.provider.publish() } } diff --git a/crates/wasmtime/src/runtime/component/component.rs b/crates/wasmtime/src/runtime/component/component.rs index 4e263ba3ffde..82071633f5c8 100644 --- a/crates/wasmtime/src/runtime/component/component.rs +++ b/crates/wasmtime/src/runtime/component/component.rs @@ -224,6 +224,27 @@ impl Component { Component::from_parts(engine, code, None) } + /// Same as [`Module::from_premapped_image`], but for components. + /// + /// Note that the images used here must contain contents previously + /// produced by [`Engine::precompile_component`] or + /// [`Component::serialize`]. + /// + /// For more information see the [`Module::from_premapped_image`] method. + /// + /// # Unsafety + /// + /// The unsafety of this method is the same as that of the + /// [`Module::from_premapped_image`] method. + pub unsafe fn from_premapped_image( + engine: &Engine, + image_range: Range<*const u8>, + finalizer: impl FnOnce() + Send + Sync + 'static, + ) -> Result { + let code = engine.load_code_premapped(image_range, finalizer, ObjectKind::Component)?; + Component::from_parts(engine, code, None) + } + /// Returns the type of this component as a [`types::Component`]. /// /// This method enables runtime introspection of the type of a component @@ -482,7 +503,7 @@ impl Component { /// [`Module::serialize`]: crate::Module::serialize /// [`Module`]: crate::Module pub fn serialize(&self) -> Result> { - Ok(self.code_object().code_memory().mmap().to_vec()) + Ok(self.code_object().code_memory().image_slice().to_vec()) } pub(crate) fn runtime_info(&self) -> Arc { @@ -606,7 +627,7 @@ impl Component { /// For more information see /// [`Module;:image_range`](crate::Module::image_range). pub fn image_range(&self) -> Range<*const u8> { - self.inner.code.code_memory().mmap().image_range() + self.inner.code.code_memory().image_range() } } diff --git a/crates/wasmtime/src/runtime/instantiate.rs b/crates/wasmtime/src/runtime/instantiate.rs index 165663a37c06..ff2185396cd1 100644 --- a/crates/wasmtime/src/runtime/instantiate.rs +++ b/crates/wasmtime/src/runtime/instantiate.rs @@ -6,6 +6,7 @@ use crate::{code_memory::CodeMemory, profiling_agent::ProfilingAgent}; use anyhow::{Error, Result}; use object::write::WritableBuffer; +use std::ops::Range; use std::str; use std::sync::Arc; use wasmtime_environ::{ @@ -75,14 +76,14 @@ impl CompiledModule { let text = self.text(); let bytes = crate::debug::create_gdbjit_image( - self.mmap().to_vec(), + self.image_slice().to_vec(), (text.as_ptr(), text.len()), ) .context("failed to create jit image for gdb")?; let reg = wasmtime_runtime::GdbJitImageRegistration::register(bytes); self.dbg_jit_registration = Some(reg); } - profiler.register_module(&self.code_memory.mmap()[..], &|addr| { + profiler.register_module(&self.image_slice(), &|addr| { let (idx, _) = self.func_by_text_offset(addr)?; let idx = self.module.func_index(idx); let name = self.func_name(idx)?; @@ -101,10 +102,20 @@ impl CompiledModule { /// Returns the underlying memory which contains the compiled module's /// image. - pub fn mmap(&self) -> &MmapVec { + pub fn mmap(&self) -> Option<&MmapVec> { self.code_memory.mmap() } + /// Returns image range + pub fn image_range(&self) -> Range<*const u8> { + self.code_memory.image_range() + } + + /// Returns image slice + pub fn image_slice(&self) -> &[u8] { + &self.code_memory.image_slice() + } + /// Returns the underlying owned mmap of this compiled image. pub fn code_memory(&self) -> &Arc { &self.code_memory diff --git a/crates/wasmtime/src/runtime/module.rs b/crates/wasmtime/src/runtime/module.rs index 1e6c2ed38d60..79f163ca85cb 100644 --- a/crates/wasmtime/src/runtime/module.rs +++ b/crates/wasmtime/src/runtime/module.rs @@ -429,6 +429,24 @@ impl Module { Module::from_parts(engine, code, None) } + /// Similiar to [`deserialize`], but uses module image that is already + /// mapped in the host address space with proper permission flags. + /// + /// `finalizer` will be called when module is destroyed. + /// + /// # Unsafety + /// + /// In addition to all the reasons that [`deserialize`] is `unsafe`, + /// this also directly uses host memory passed in `image_range`. + pub unsafe fn from_premapped_image( + engine: &Engine, + image_range: Range<*const u8>, + finalizer: impl FnOnce() + Send + Sync + 'static, + ) -> Result { + let code = engine.load_code_premapped(image_range, finalizer, ObjectKind::Module)?; + Module::from_parts(engine, code, None) + } + /// Entrypoint for creating a `Module` for all above functions, both /// of the AOT and jit-compiled cateogries. /// @@ -573,7 +591,7 @@ impl Module { if !self.inner.serializable { bail!("cannot serialize a module exported from a component"); } - Ok(self.compiled_module().mmap().to_vec()) + Ok(self.compiled_module().image_slice().to_vec()) } pub(crate) fn compiled_module(&self) -> &CompiledModule { @@ -922,7 +940,7 @@ impl Module { /// It is not safe to modify the memory in this range, nor is it safe to /// modify the protections of memory in this range. pub fn image_range(&self) -> Range<*const u8> { - self.compiled_module().mmap().image_range() + self.compiled_module().image_range() } /// Force initialization of copy-on-write images to happen here-and-now @@ -1248,7 +1266,7 @@ fn memory_images(engine: &Engine, module: &CompiledModule) -> Result