From 9d6bf22ffd77a85a465ba427176175ec3e081887 Mon Sep 17 00:00:00 2001 From: Andrew Brown Date: Wed, 25 Oct 2023 13:50:17 -0700 Subject: [PATCH] mpk: limit the number of protection keys (#7364) * mpk: add `max_memory_protection_keys` If Wasmtime is ever embedded in an application that also uses memory protection keys, it could be useful to limit how many Wasmtime allocates and uses. This came up while examining `*.wast` tests: if there was no way limiting the number of keys used, then those tests configured a pool that reserved too much memory. This change takes that further to attempt to limit the initial number of keys allocated. The unfortunate side effect of using a `OnceLock` is that the `max` setting is only applicable on the first invocation, the one that sets the `OnceLock`. * mpk: use two protection keys for WAST tests This change stems from how slicing memory slots into MPK-protected regions limits the number of memories each store can access: e.g., with fifteen keys in use, a store only has access to a fifteenth of the available slots. If we simply multiple the number of memory slots needed to run the `*.wast` spec tests by fifteen, we run out of available memory. This limits the number of protection keys used to two, which still allows us to test the functionality without reserving too much memory. * mpk: ensure `keys` only ever returns `max` items This addresses a review comment to slice the list of keys down to the `max` hint regardless of how many are allocated in the first invocation. * fix: remove warning about unused parameter --- .../runtime/src/instance/allocator/pooling.rs | 3 ++ .../instance/allocator/pooling/memory_pool.rs | 4 +- crates/runtime/src/mpk/disabled.rs | 2 +- crates/runtime/src/mpk/enabled.rs | 39 +++++++++++-------- crates/wasmtime/src/config.rs | 17 ++++++++ tests/all/wast.rs | 12 +++--- 6 files changed, 52 insertions(+), 25 deletions(-) diff --git a/crates/runtime/src/instance/allocator/pooling.rs b/crates/runtime/src/instance/allocator/pooling.rs index 7d00a59052bb..e0584726fa67 100644 --- a/crates/runtime/src/instance/allocator/pooling.rs +++ b/crates/runtime/src/instance/allocator/pooling.rs @@ -219,6 +219,8 @@ pub struct PoolingInstanceAllocatorConfig { pub table_keep_resident: usize, /// Whether to enable memory protection keys. pub memory_protection_keys: MpkEnabled, + /// How many memory protection keys to allocate. + pub max_memory_protection_keys: usize, } impl Default for PoolingInstanceAllocatorConfig { @@ -232,6 +234,7 @@ impl Default for PoolingInstanceAllocatorConfig { linear_memory_keep_resident: 0, table_keep_resident: 0, memory_protection_keys: MpkEnabled::Disable, + max_memory_protection_keys: 16, } } } diff --git a/crates/runtime/src/instance/allocator/pooling/memory_pool.rs b/crates/runtime/src/instance/allocator/pooling/memory_pool.rs index 47455a0e3558..eda1f4b52a2f 100644 --- a/crates/runtime/src/instance/allocator/pooling/memory_pool.rs +++ b/crates/runtime/src/instance/allocator/pooling/memory_pool.rs @@ -156,14 +156,14 @@ impl MemoryPool { let pkeys = match config.memory_protection_keys { MpkEnabled::Auto => { if mpk::is_supported() { - mpk::keys() + mpk::keys(config.max_memory_protection_keys) } else { &[] } } MpkEnabled::Enable => { if mpk::is_supported() { - mpk::keys() + mpk::keys(config.max_memory_protection_keys) } else { bail!("mpk is disabled on this system") } diff --git a/crates/runtime/src/mpk/disabled.rs b/crates/runtime/src/mpk/disabled.rs index bbeccc80e893..7f6de55bc3ea 100644 --- a/crates/runtime/src/mpk/disabled.rs +++ b/crates/runtime/src/mpk/disabled.rs @@ -8,7 +8,7 @@ use anyhow::Result; pub fn is_supported() -> bool { false } -pub fn keys() -> &'static [ProtectionKey] { +pub fn keys(_: usize) -> &'static [ProtectionKey] { &[] } pub fn allow(_: ProtectionMask) {} diff --git a/crates/runtime/src/mpk/enabled.rs b/crates/runtime/src/mpk/enabled.rs index fe1bdf858223..f8a045d3a814 100644 --- a/crates/runtime/src/mpk/enabled.rs +++ b/crates/runtime/src/mpk/enabled.rs @@ -9,34 +9,39 @@ pub fn is_supported() -> bool { cfg!(target_os = "linux") && cfg!(target_arch = "x86_64") && pkru::has_cpuid_bit_set() } -/// Allocate all protection keys available to this process. +/// Allocate up to `max` protection keys. /// -/// This asks the kernel for all available keys (we expect 1-15; 0 is -/// kernel-reserved) in a thread-safe way. This avoids interference when +/// This asks the kernel for all available keys up to `max` in a thread-safe way +/// (we can expect 1-15; 0 is kernel-reserved). This avoids interference when /// multiple threads try to allocate keys at the same time (e.g., during -/// testing). It also ensures that a single copy of the keys are reserved for -/// the lifetime of the process. +/// testing). It also ensures that a single copy of the keys is reserved for the +/// lifetime of the process. Because of this, `max` is only a hint to +/// allocation: it only is effective on the first invocation of this function. /// /// TODO: this is not the best-possible design. This creates global state that /// would prevent any other code in the process from using protection keys; the /// `KEYS` are never deallocated from the system with `pkey_dealloc`. -pub fn keys() -> &'static [ProtectionKey] { +pub fn keys(max: usize) -> &'static [ProtectionKey] { let keys = KEYS.get_or_init(|| { let mut allocated = vec![]; if is_supported() { - while let Ok(key_id) = sys::pkey_alloc(0, 0) { - debug_assert!(key_id < 16); - // UNSAFETY: here we unsafely assume that the system-allocated - // pkey will exist forever. - allocated.push(ProtectionKey { - id: key_id, - stripe: allocated.len().try_into().unwrap(), - }); + while allocated.len() < max { + if let Ok(key_id) = sys::pkey_alloc(0, 0) { + debug_assert!(key_id < 16); + // UNSAFETY: here we unsafely assume that the + // system-allocated pkey will exist forever. + allocated.push(ProtectionKey { + id: key_id, + stripe: allocated.len().try_into().unwrap(), + }); + } else { + break; + } } } allocated }); - &keys + &keys[..keys.len().min(max)] } static KEYS: OnceLock> = OnceLock::new(); @@ -152,14 +157,14 @@ mod tests { #[test] fn check_initialized_keys() { if is_supported() { - assert!(!keys().is_empty()) + assert!(!keys(15).is_empty()) } } #[test] fn check_invalid_mark() { skip_if_mpk_unavailable!(); - let pkey = keys()[0]; + let pkey = keys(15)[0]; let unaligned_region = unsafe { let addr = 1 as *mut u8; // this is not page-aligned! let len = 1; diff --git a/crates/wasmtime/src/config.rs b/crates/wasmtime/src/config.rs index 3f5cb799b9f3..9f6e634cbdc2 100644 --- a/crates/wasmtime/src/config.rs +++ b/crates/wasmtime/src/config.rs @@ -2386,6 +2386,23 @@ impl PoolingAllocationConfig { self } + /// Sets an upper limit on how many memory protection keys (MPK) Wasmtime + /// will use. + /// + /// This setting is only applicable when + /// [`PoolingAllocationConfig::memory_protection_keys`] is set to `enable` + /// or `auto`. Configuring this above the HW and OS limits (typically 15) + /// has no effect. + /// + /// If multiple Wasmtime engines are used in the same process, note that all + /// engines will share the same set of allocated keys; this setting will + /// limit how many keys are allocated initially and thus available to all + /// other engines. + pub fn max_memory_protection_keys(&mut self, max: usize) -> &mut Self { + self.config.max_memory_protection_keys = max; + self + } + /// Check if memory protection keys (MPK) are available on the current host. /// /// This is a convenience method for determining MPK availability using the diff --git a/tests/all/wast.rs b/tests/all/wast.rs index 8f2f357be67d..5b93799aba9e 100644 --- a/tests/all/wast.rs +++ b/tests/all/wast.rs @@ -123,11 +123,13 @@ fn run_wast(wast: &str, strategy: Strategy, pooling: bool) -> anyhow::Result<()> } // The limits here are crafted such that the wast tests should pass. - // However, these limits may become insufficient in the future as the wast tests change. - // If a wast test fails because of a limit being "exceeded" or if memory/table - // fails to grow, the values here will need to be adjusted. + // However, these limits may become insufficient in the future as the + // wast tests change. If a wast test fails because of a limit being + // "exceeded" or if memory/table fails to grow, the values here will + // need to be adjusted. let mut pool = PoolingAllocationConfig::default(); - pool.total_memories(450) + pool.total_memories(450 * 2) + .max_memory_protection_keys(2) .memory_pages(805) .max_memories_per_module(if multi_memory { 9 } else { 1 }) .max_tables_per_module(4); @@ -180,7 +182,7 @@ fn feature_found_src(bytes: &[u8], name: &str) -> bool { // specified maximum we can put a cap on the virtual address space reservations // made. fn lock_pooling() -> impl Drop { - const MAX_CONCURRENT_POOLING: u32 = 8; + const MAX_CONCURRENT_POOLING: u32 = 4; static ACTIVE: Lazy = Lazy::new(MyState::default);