From 6ffb849a73b00410ed3954fcad4b9ccb190bdce1 Mon Sep 17 00:00:00 2001 From: Frank Emrich Date: Tue, 3 Sep 2024 16:02:48 +0100 Subject: [PATCH] Remove fibre::{Fiber, Suspend} types (#220) This PR performs a refactoring required by a follow-up PR that will implement actual pooling of `VMContRef`s. This PR removes the types `Fiber` and `Suspend` from the `fibre` create (i.e., our version of `wasmtime-fiber`). These two types are effectively leftovers from `wasmtime-fiber`, but only add a layer of unnecessary indirection at this point. Note that the `fibre::FiberStack` type remains. Some functions originally implemented on `Fiber` are moved to `FiberStack` now. Further, the `VMContRef` definition used in the optimized implementation now owns a `fibre::FiberStack` directly, rather than storing a `Fiber` as a wrapper around the `FiberStack`. This PR does not affect the baseline implementation since it doesn't use the `fibre` crate at all. --- .github/workflows/main.yml | 12 ++- crates/continuations/src/lib.rs | 10 +- crates/cranelift/src/wasmfx/optimized.rs | 9 ++ .../wasmtime/src/runtime/vm/continuation.rs | 59 ++++++------ crates/wasmtime/src/runtime/vm/fibre/mod.rs | 74 +++------------ crates/wasmtime/src/runtime/vm/fibre/unix.rs | 91 ++++++++----------- .../src/runtime/vm/traphandlers/backtrace.rs | 2 +- 7 files changed, 108 insertions(+), 149 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 4ec774123472..0aa31c984bba 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -706,8 +706,18 @@ jobs: # Crude check for whether # `unsafe_disable_continuation_linearity_check` makes the test # `cont_twice` fail. + # We expect the test case to trigger an internal assertion failure (rather + # than a proper Wasm trap), which turns into a SIGILL (4). + # However, we are happy as long as the test fails in some way, meaning that + # it did not produce the correct Wasm trap (which it should only do if + # unsafe_disable_continuation_linearity_check is disabled) - run: | - (cargo run --features=unsafe_disable_continuation_linearity_check -- wast -W=exceptions,function-references,typed-continuations tests/misc_testsuite/typed-continuations/cont_twice.wast && test $? -eq 101) || test $? -eq 101 + # By default, Github actions run commands with pipefail enabled. We don't want that, as the command below is supposed to fail: + set +e + # Let's keep the build step separate from the run step below, because the former should not fail: + cargo build --features=unsafe_disable_continuation_linearity_check + # We want this to fail and are happy with any non-zero exit code: + cargo run --features=unsafe_disable_continuation_linearity_check -- wast -W=exceptions,function-references,typed-continuations tests/misc_testsuite/typed-continuations/cont_twice.wast ; test $? -ne 0 # NB: the test job here is explicitly lacking in cancellation of this run if # something goes wrong. These take the longest anyway and otherwise if diff --git a/crates/continuations/src/lib.rs b/crates/continuations/src/lib.rs index 4a2dffda7014..807a592ba9c7 100644 --- a/crates/continuations/src/lib.rs +++ b/crates/continuations/src/lib.rs @@ -180,10 +180,10 @@ pub mod offsets { pub const LIMITS: usize = 0; /// Offset of `parent_chain` field pub const PARENT_CHAIN: usize = LIMITS + 4 * core::mem::size_of::(); - /// Offset of `fiber` field - pub const FIBER: usize = PARENT_CHAIN + 2 * core::mem::size_of::(); + /// Offset of `stack` field + pub const STACK: usize = PARENT_CHAIN + 2 * core::mem::size_of::(); /// Offset of `args` field - pub const ARGS: usize = FIBER + super::CONTINUATION_FIBER_SIZE; + pub const ARGS: usize = STACK + super::FIBER_STACK_SIZE; /// Offset of `tag_return_values` field pub const TAG_RETURN_VALUES: usize = ARGS + core::mem::size_of::(); /// Offset of `state` field @@ -202,9 +202,9 @@ pub mod offsets { pub const LAST_WASM_ENTRY_SP: usize = offset_of!(StackLimits, last_wasm_entry_sp); } - /// Size of wasmtime_runtime::continuation::ContinuationFiber. + /// Size of wasmtime_runtime::continuation::FiberStack. /// We test there that this value is correct. - pub const CONTINUATION_FIBER_SIZE: usize = 4 * core::mem::size_of::(); + pub const FIBER_STACK_SIZE: usize = 3 * core::mem::size_of::(); /// Size of type `wasmtime_runtime::continuation::StackChain`. /// We test there that this value is correct. diff --git a/crates/cranelift/src/wasmfx/optimized.rs b/crates/cranelift/src/wasmfx/optimized.rs index 6a1967887d6e..3a4e9a5d61f8 100644 --- a/crates/cranelift/src/wasmfx/optimized.rs +++ b/crates/cranelift/src/wasmfx/optimized.rs @@ -1452,6 +1452,15 @@ pub(crate) fn translate_resume<'a>( let next_revision = vmcontref.incr_revision(env, builder, revision); emit_debug_println!(env, builder, "[resume] new revision = {}", next_revision); + if cfg!(debug_assertions) { + // This should be impossible due to the linearity check. + // We keep this check mostly for the test that runs a continuation + // twice with `unsafe_disable_continuation_linearity_check` enabled. + let zero = builder.ins().iconst(I8, 0); + let has_returned = vmcontref.has_returned(builder); + emit_debug_assert_eq!(env, builder, has_returned, zero); + } + if resume_args.len() > 0 { // We store the arguments in the `VMContRef` to be resumed. let count = builder.ins().iconst(I32, resume_args.len() as i64); diff --git a/crates/wasmtime/src/runtime/vm/continuation.rs b/crates/wasmtime/src/runtime/vm/continuation.rs index 1bc0bb34d7b8..342d6488968f 100644 --- a/crates/wasmtime/src/runtime/vm/continuation.rs +++ b/crates/wasmtime/src/runtime/vm/continuation.rs @@ -74,7 +74,6 @@ unsafe impl Sync for VMContObj {} pub mod optimized { use super::stack_chain::StackChain; use crate::runtime::vm::{ - fibre::Fiber, vmcontext::{VMFuncRef, ValRaw}, Instance, TrapReason, }; @@ -85,7 +84,6 @@ pub mod optimized { use wasmtime_environ::prelude::*; /// Fibers used for continuations - pub type ContinuationFiber = Fiber; pub type FiberStack = crate::runtime::vm::fibre::FiberStack; /// TODO @@ -98,8 +96,8 @@ pub mod optimized { /// main stack, or absent (in case of a suspended continuation). pub parent_chain: StackChain, - /// The underlying `Fiber`. - pub fiber: ContinuationFiber, + /// The underlying stack. + pub stack: FiberStack, /// Used to store /// 1. The arguments to the function passed to cont.new @@ -119,6 +117,12 @@ pub mod optimized { pub revision: u64, } + impl VMContRef { + pub fn fiber_stack(&self) -> &FiberStack { + &self.stack + } + } + /// TODO pub fn cont_ref_forward_tag_return_values_buffer( parent: *mut VMContRef, @@ -153,7 +157,11 @@ pub mod optimized { // parent fields here. let contref: Box = unsafe { Box::from_raw(contref) }; - instance.wasmfx_deallocate_stack(contref.fiber.stack()); + + // A continuation must have run to completion before deallocating it. + assert!(contref.state == State::Returned); + + instance.wasmfx_deallocate_stack(&contref.stack); if contref.args.data.is_null() { debug_assert!(contref.args.length as usize == 0); debug_assert!(contref.args.capacity as usize == 0); @@ -200,30 +208,27 @@ pub mod optimized { let stack_size = wasmfx_config.stack_size; let red_zone_size = wasmfx_config.red_zone_size; - let fiber = { + let stack = { let stack = instance.wasmfx_allocate_stack().map_err(|_error| { TrapReason::user_without_backtrace(anyhow::anyhow!( "Fiber stack allocation failed!" )) })?; - Fiber::new( - stack, + stack.initialize( func.cast::(), caller_vmctx, payload.data as *mut ValRaw, payload.capacity as usize, - ) - .map_err(|_error| { - TrapReason::user_without_backtrace(anyhow::anyhow!("Fiber construction failed!")) - })? + ); + stack }; - let tsp = fiber.stack().top().unwrap(); + let tsp = stack.top().unwrap(); let stack_limit = unsafe { tsp.sub(stack_size - red_zone_size) } as usize; let contref = Box::new(VMContRef { revision: 0, limits: StackLimits::with_stack_limit(stack_limit), - fiber, + stack, parent_chain: StackChain::Absent, args: payload, tag_return_values: Payloads::new(0), @@ -295,7 +300,7 @@ pub mod optimized { *runtime_limits.last_wasm_entry_sp.get() = (*contref).limits.last_wasm_entry_sp; } - Ok(cont.fiber.resume()) + Ok(cont.stack.resume()) } /// TODO @@ -321,22 +326,15 @@ pub mod optimized { } }?; - let fiber = &running.fiber; - - let stack_ptr = fiber.stack().top().ok_or_else(|| { - TrapReason::user_without_backtrace(anyhow::anyhow!( - "Failed to retrieve stack top pointer!" - )) - })?; + let stack = &running.stack; debug_println!( "Suspending while running {:p}, parent is {:?}", running, running.parent_chain ); - let suspend = crate::runtime::vm::fibre::unix::Suspend::from_top_ptr(stack_ptr); let payload = ControlEffect::suspend(tag_addr as *const u8); - Ok(suspend.switch(payload)) + Ok(stack.suspend(payload)) } // Tests @@ -350,7 +348,7 @@ pub mod optimized { offset_of!(VMContRef, parent_chain), vm_cont_ref::PARENT_CHAIN ); - assert_eq!(offset_of!(VMContRef, fiber), vm_cont_ref::FIBER); + assert_eq!(offset_of!(VMContRef, stack), vm_cont_ref::STACK); assert_eq!(offset_of!(VMContRef, args), vm_cont_ref::ARGS); assert_eq!( offset_of!(VMContRef, tag_return_values), @@ -358,10 +356,7 @@ pub mod optimized { ); assert_eq!(offset_of!(VMContRef, state), vm_cont_ref::STATE); - assert_eq!( - core::mem::size_of::(), - CONTINUATION_FIBER_SIZE - ); + assert_eq!(core::mem::size_of::(), FIBER_STACK_SIZE); assert_eq!(core::mem::size_of::(), STACK_CHAIN_SIZE); assert_eq!(offset_of!(VMContRef, revision), vm_cont_ref::REVISION); @@ -400,6 +395,12 @@ pub mod baseline { pub _marker: core::marker::PhantomPinned, } + impl VMContRef { + pub fn fiber_stack(&self) -> &FiberStack { + self.fiber.stack() + } + } + // We use thread local state to simulate the VMContext. The use of // thread local state is necessary to reliably pass the testsuite, // as the test driver is multi-threaded. diff --git a/crates/wasmtime/src/runtime/vm/fibre/mod.rs b/crates/wasmtime/src/runtime/vm/fibre/mod.rs index 7486cb06b843..9ddf8c0005c0 100644 --- a/crates/wasmtime/src/runtime/vm/fibre/mod.rs +++ b/crates/wasmtime/src/runtime/vm/fibre/mod.rs @@ -6,7 +6,6 @@ cfg_if::cfg_if! { if #[cfg(not(feature = "wasmfx_baseline"))] { - use std::cell::Cell; use std::io; use std::ops::Range; use wasmtime_continuations::ControlEffect; @@ -24,6 +23,7 @@ cfg_if::cfg_if! { /// Represents an execution stack to use for a fiber. #[derive(Debug)] + #[repr(C)] pub struct FiberStack(imp::FiberStack); impl FiberStack { @@ -66,75 +66,27 @@ cfg_if::cfg_if! { pub fn range(&self) -> Option> { self.0.range() } - } - - pub struct Fiber { - stack: FiberStack, - inner: imp::Fiber, - done: Cell, - } - - impl Fiber { - /// Creates a new fiber which will execute `func` on the given stack. - /// - /// This function returns a `Fiber` which, when resumed, will execute `func` - /// to completion. When desired the `func` can suspend itself via - /// `Fiber::suspend`. - pub fn new( - stack: FiberStack, - func_ref: *const VMFuncRef, - caller_vmctx: *mut VMContext, - args_ptr: *mut ValRaw, - args_capacity: usize, - ) -> io::Result { - let inner = imp::Fiber::new(&stack.0, func_ref, caller_vmctx, args_ptr, args_capacity)?; - - Ok(Self { - stack, - inner, - done: Cell::new(false), - }) - } /// Resumes execution of this fiber. - /// - /// This function will transfer execution to the fiber and resume from where - /// it last left off. - /// - /// Returns `true` if the fiber finished or `false` if the fiber was - /// suspended in the middle of execution. - /// - /// # Panics - /// - /// Panics if the current thread is already executing a fiber or if this - /// fiber has already finished. - /// - /// Note that if the fiber itself panics during execution then the panic - /// will be propagated to this caller. pub fn resume(&self) -> ControlEffect { - assert!(!self.done.replace(true), "cannot resume a finished fiber"); - let reason = self.inner.resume(&self.stack.0); - if ControlEffect::is_suspend(reason) { - self.done.set(false) - } - reason + self.0.resume() } - /// Returns whether this fiber has finished executing. - pub fn done(&self) -> bool { - self.done.get() + pub fn suspend(&self, payload: ControlEffect) { + self.0.suspend(payload) } - /// Gets the stack associated with this fiber. - pub fn stack(&self) -> &FiberStack { - &self.stack + pub fn initialize( + &self, + func_ref: *const VMFuncRef, + caller_vmctx: *mut VMContext, + args_ptr: *mut ValRaw, + args_capacity: usize, + ) { + self.0.initialize(func_ref, caller_vmctx, args_ptr, args_capacity) } } - impl Drop for Fiber { - fn drop(&mut self) { - debug_assert!(self.done.get(), "fiber dropped without finishing"); - } - } + } } diff --git a/crates/wasmtime/src/runtime/vm/fibre/unix.rs b/crates/wasmtime/src/runtime/vm/fibre/unix.rs index b9b9bc1707e3..0bf18311f802 100644 --- a/crates/wasmtime/src/runtime/vm/fibre/unix.rs +++ b/crates/wasmtime/src/runtime/vm/fibre/unix.rs @@ -116,6 +116,7 @@ pub enum Allocator { } #[derive(Debug)] +#[repr(C)] pub struct FiberStack { // The top of the stack; for stacks allocated by the fiber implementation itself, // the base address of the allocation will be `top.sub(len.unwrap())` @@ -190,6 +191,43 @@ impl FiberStack { let base = unsafe { self.top.sub(self.len) as usize }; Some(base..base + self.len) } + + pub fn initialize( + &self, + func_ref: *const VMFuncRef, + caller_vmctx: *mut VMContext, + args_ptr: *mut ValRaw, + args_capacity: usize, + ) { + unsafe { + wasmtime_fibre_init( + self.top, + func_ref, + caller_vmctx, + args_ptr, + args_capacity, + wasmtime_fibre_switch as *const u8, + ); + } + } + + pub(crate) fn resume(&self) -> ControlEffect { + unsafe { + let reason = ControlEffect::resume().into(); + ControlEffect::from(wasmtime_fibre_switch(self.top, reason)) + } + } + + pub fn suspend(&self, payload: ControlEffect) { + suspend_fiber(self.top, payload) + } +} + +pub fn suspend_fiber(top_of_stack: *mut u8, payload: ControlEffect) { + unsafe { + let arg = payload.into(); + wasmtime_fibre_switch(top_of_stack, arg); + } } impl Drop for FiberStack { @@ -210,10 +248,6 @@ impl Drop for FiberStack { } } -pub struct Fiber; - -pub struct Suspend(*mut u8); - extern "C" { // We allow "improper ctypes" here (i.e., passing values as parameters in an // extern C function that Rust deems non FFI-safe): The two problematic @@ -262,55 +296,8 @@ extern "C" fn fiber_start( array_call_trampoline(callee_vmxtx, caller_vmxtx, args_ptr, args_capacity); // Switch back to parent, indicating that the continuation returned. - let inner = Suspend(top_of_stack); let reason = ControlEffect::return_(); - inner.switch(reason); - } -} - -impl Fiber { - pub fn new( - stack: &FiberStack, - func_ref: *const VMFuncRef, - caller_vmctx: *mut VMContext, - args_ptr: *mut ValRaw, - args_capacity: usize, - ) -> io::Result { - unsafe { - wasmtime_fibre_init( - stack.top, - func_ref, - caller_vmctx, - args_ptr, - args_capacity, - wasmtime_fibre_switch as *const u8, - ); - } - - Ok(Self) - } - - pub(crate) fn resume(&self, stack: &FiberStack) -> ControlEffect { - unsafe { - let reason = ControlEffect::resume().into(); - ControlEffect::from(wasmtime_fibre_switch(stack.top, reason)) - } - } -} - -impl Suspend { - pub fn switch(&self, payload: ControlEffect) { - unsafe { - let arg = payload.into(); - wasmtime_fibre_switch(self.0, arg); - } - } - - // NOTE(dhil): This function is never applied when using the - // baseline implementation. - #[allow(dead_code)] - pub fn from_top_ptr(ptr: *mut u8) -> Self { - Suspend(ptr) + suspend_fiber(top_of_stack, reason) } } diff --git a/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs b/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs index a8806f2536fc..8f0b195bde83 100644 --- a/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs +++ b/crates/wasmtime/src/runtime/vm/traphandlers/backtrace.rs @@ -229,7 +229,7 @@ impl Backtrace { match continuation_opt { Some(continuation) => unsafe { let cont = &*continuation; - let stack_range = cont.fiber.stack().range().unwrap(); + let stack_range = cont.fiber_stack().range().unwrap(); debug_assert!(stack_range.contains(&limits.last_wasm_exit_fp)); debug_assert!(stack_range.contains(&limits.last_wasm_entry_sp)); debug_assert!(stack_range.contains(&limits.stack_limit));