Skip to content

Commit

Permalink
Remove fibre::{Fiber, Suspend} types (bytecodealliance#220)
Browse files Browse the repository at this point in the history
This PR performs a refactoring required by a follow-up PR that will
implement actual pooling of `VMContRef`s.

This PR removes the types `Fiber` and `Suspend` from the `fibre` create
(i.e., our version of `wasmtime-fiber`). These two types are effectively
leftovers from `wasmtime-fiber`, but only add a layer of unnecessary
indirection at this point.

Note that the `fibre::FiberStack` type remains. Some functions
originally implemented on `Fiber` are moved to `FiberStack` now.
Further, the `VMContRef` definition used in the optimized implementation
now owns a `fibre::FiberStack` directly, rather than storing a `Fiber`
as a wrapper around the `FiberStack`.

This PR does not affect the baseline implementation since it doesn't use
the `fibre` crate at all.
  • Loading branch information
frank-emrich committed Sep 3, 2024
1 parent 9a7d6f9 commit 6ffb849
Show file tree
Hide file tree
Showing 7 changed files with 108 additions and 149 deletions.
12 changes: 11 additions & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -706,8 +706,18 @@ jobs:
# Crude check for whether
# `unsafe_disable_continuation_linearity_check` makes the test
# `cont_twice` fail.
# We expect the test case to trigger an internal assertion failure (rather
# than a proper Wasm trap), which turns into a SIGILL (4).
# However, we are happy as long as the test fails in some way, meaning that
# it did not produce the correct Wasm trap (which it should only do if
# unsafe_disable_continuation_linearity_check is disabled)
- run: |
(cargo run --features=unsafe_disable_continuation_linearity_check -- wast -W=exceptions,function-references,typed-continuations tests/misc_testsuite/typed-continuations/cont_twice.wast && test $? -eq 101) || test $? -eq 101
# By default, Github actions run commands with pipefail enabled. We don't want that, as the command below is supposed to fail:
set +e
# Let's keep the build step separate from the run step below, because the former should not fail:
cargo build --features=unsafe_disable_continuation_linearity_check
# We want this to fail and are happy with any non-zero exit code:
cargo run --features=unsafe_disable_continuation_linearity_check -- wast -W=exceptions,function-references,typed-continuations tests/misc_testsuite/typed-continuations/cont_twice.wast ; test $? -ne 0
# NB: the test job here is explicitly lacking in cancellation of this run if
# something goes wrong. These take the longest anyway and otherwise if
Expand Down
10 changes: 5 additions & 5 deletions crates/continuations/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,10 +180,10 @@ pub mod offsets {
pub const LIMITS: usize = 0;
/// Offset of `parent_chain` field
pub const PARENT_CHAIN: usize = LIMITS + 4 * core::mem::size_of::<usize>();
/// Offset of `fiber` field
pub const FIBER: usize = PARENT_CHAIN + 2 * core::mem::size_of::<usize>();
/// Offset of `stack` field
pub const STACK: usize = PARENT_CHAIN + 2 * core::mem::size_of::<usize>();
/// Offset of `args` field
pub const ARGS: usize = FIBER + super::CONTINUATION_FIBER_SIZE;
pub const ARGS: usize = STACK + super::FIBER_STACK_SIZE;
/// Offset of `tag_return_values` field
pub const TAG_RETURN_VALUES: usize = ARGS + core::mem::size_of::<Payloads>();
/// Offset of `state` field
Expand All @@ -202,9 +202,9 @@ pub mod offsets {
pub const LAST_WASM_ENTRY_SP: usize = offset_of!(StackLimits, last_wasm_entry_sp);
}

/// Size of wasmtime_runtime::continuation::ContinuationFiber.
/// Size of wasmtime_runtime::continuation::FiberStack.
/// We test there that this value is correct.
pub const CONTINUATION_FIBER_SIZE: usize = 4 * core::mem::size_of::<usize>();
pub const FIBER_STACK_SIZE: usize = 3 * core::mem::size_of::<usize>();

/// Size of type `wasmtime_runtime::continuation::StackChain`.
/// We test there that this value is correct.
Expand Down
9 changes: 9 additions & 0 deletions crates/cranelift/src/wasmfx/optimized.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1452,6 +1452,15 @@ pub(crate) fn translate_resume<'a>(
let next_revision = vmcontref.incr_revision(env, builder, revision);
emit_debug_println!(env, builder, "[resume] new revision = {}", next_revision);

if cfg!(debug_assertions) {
// This should be impossible due to the linearity check.
// We keep this check mostly for the test that runs a continuation
// twice with `unsafe_disable_continuation_linearity_check` enabled.
let zero = builder.ins().iconst(I8, 0);
let has_returned = vmcontref.has_returned(builder);
emit_debug_assert_eq!(env, builder, has_returned, zero);
}

if resume_args.len() > 0 {
// We store the arguments in the `VMContRef` to be resumed.
let count = builder.ins().iconst(I32, resume_args.len() as i64);
Expand Down
59 changes: 30 additions & 29 deletions crates/wasmtime/src/runtime/vm/continuation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ unsafe impl Sync for VMContObj {}
pub mod optimized {
use super::stack_chain::StackChain;
use crate::runtime::vm::{
fibre::Fiber,
vmcontext::{VMFuncRef, ValRaw},
Instance, TrapReason,
};
Expand All @@ -85,7 +84,6 @@ pub mod optimized {
use wasmtime_environ::prelude::*;

/// Fibers used for continuations
pub type ContinuationFiber = Fiber;
pub type FiberStack = crate::runtime::vm::fibre::FiberStack;

/// TODO
Expand All @@ -98,8 +96,8 @@ pub mod optimized {
/// main stack, or absent (in case of a suspended continuation).
pub parent_chain: StackChain,

/// The underlying `Fiber`.
pub fiber: ContinuationFiber,
/// The underlying stack.
pub stack: FiberStack,

/// Used to store
/// 1. The arguments to the function passed to cont.new
Expand All @@ -119,6 +117,12 @@ pub mod optimized {
pub revision: u64,
}

impl VMContRef {
pub fn fiber_stack(&self) -> &FiberStack {
&self.stack
}
}

/// TODO
pub fn cont_ref_forward_tag_return_values_buffer(
parent: *mut VMContRef,
Expand Down Expand Up @@ -153,7 +157,11 @@ pub mod optimized {
// parent fields here.

let contref: Box<VMContRef> = unsafe { Box::from_raw(contref) };
instance.wasmfx_deallocate_stack(contref.fiber.stack());

// A continuation must have run to completion before deallocating it.
assert!(contref.state == State::Returned);

instance.wasmfx_deallocate_stack(&contref.stack);
if contref.args.data.is_null() {
debug_assert!(contref.args.length as usize == 0);
debug_assert!(contref.args.capacity as usize == 0);
Expand Down Expand Up @@ -200,30 +208,27 @@ pub mod optimized {
let stack_size = wasmfx_config.stack_size;
let red_zone_size = wasmfx_config.red_zone_size;

let fiber = {
let stack = {
let stack = instance.wasmfx_allocate_stack().map_err(|_error| {
TrapReason::user_without_backtrace(anyhow::anyhow!(
"Fiber stack allocation failed!"
))
})?;
Fiber::new(
stack,
stack.initialize(
func.cast::<VMFuncRef>(),
caller_vmctx,
payload.data as *mut ValRaw,
payload.capacity as usize,
)
.map_err(|_error| {
TrapReason::user_without_backtrace(anyhow::anyhow!("Fiber construction failed!"))
})?
);
stack
};

let tsp = fiber.stack().top().unwrap();
let tsp = stack.top().unwrap();
let stack_limit = unsafe { tsp.sub(stack_size - red_zone_size) } as usize;
let contref = Box::new(VMContRef {
revision: 0,
limits: StackLimits::with_stack_limit(stack_limit),
fiber,
stack,
parent_chain: StackChain::Absent,
args: payload,
tag_return_values: Payloads::new(0),
Expand Down Expand Up @@ -295,7 +300,7 @@ pub mod optimized {
*runtime_limits.last_wasm_entry_sp.get() = (*contref).limits.last_wasm_entry_sp;
}

Ok(cont.fiber.resume())
Ok(cont.stack.resume())
}

/// TODO
Expand All @@ -321,22 +326,15 @@ pub mod optimized {
}
}?;

let fiber = &running.fiber;

let stack_ptr = fiber.stack().top().ok_or_else(|| {
TrapReason::user_without_backtrace(anyhow::anyhow!(
"Failed to retrieve stack top pointer!"
))
})?;
let stack = &running.stack;
debug_println!(
"Suspending while running {:p}, parent is {:?}",
running,
running.parent_chain
);

let suspend = crate::runtime::vm::fibre::unix::Suspend::from_top_ptr(stack_ptr);
let payload = ControlEffect::suspend(tag_addr as *const u8);
Ok(suspend.switch(payload))
Ok(stack.suspend(payload))
}

// Tests
Expand All @@ -350,18 +348,15 @@ pub mod optimized {
offset_of!(VMContRef, parent_chain),
vm_cont_ref::PARENT_CHAIN
);
assert_eq!(offset_of!(VMContRef, fiber), vm_cont_ref::FIBER);
assert_eq!(offset_of!(VMContRef, stack), vm_cont_ref::STACK);
assert_eq!(offset_of!(VMContRef, args), vm_cont_ref::ARGS);
assert_eq!(
offset_of!(VMContRef, tag_return_values),
vm_cont_ref::TAG_RETURN_VALUES
);
assert_eq!(offset_of!(VMContRef, state), vm_cont_ref::STATE);

assert_eq!(
core::mem::size_of::<ContinuationFiber>(),
CONTINUATION_FIBER_SIZE
);
assert_eq!(core::mem::size_of::<FiberStack>(), FIBER_STACK_SIZE);
assert_eq!(core::mem::size_of::<StackChain>(), STACK_CHAIN_SIZE);

assert_eq!(offset_of!(VMContRef, revision), vm_cont_ref::REVISION);
Expand Down Expand Up @@ -400,6 +395,12 @@ pub mod baseline {
pub _marker: core::marker::PhantomPinned,
}

impl VMContRef {
pub fn fiber_stack(&self) -> &FiberStack {
self.fiber.stack()
}
}

// We use thread local state to simulate the VMContext. The use of
// thread local state is necessary to reliably pass the testsuite,
// as the test driver is multi-threaded.
Expand Down
74 changes: 13 additions & 61 deletions crates/wasmtime/src/runtime/vm/fibre/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
cfg_if::cfg_if! {
if #[cfg(not(feature = "wasmfx_baseline"))] {

use std::cell::Cell;
use std::io;
use std::ops::Range;
use wasmtime_continuations::ControlEffect;
Expand All @@ -24,6 +23,7 @@ cfg_if::cfg_if! {

/// Represents an execution stack to use for a fiber.
#[derive(Debug)]
#[repr(C)]
pub struct FiberStack(imp::FiberStack);

impl FiberStack {
Expand Down Expand Up @@ -66,75 +66,27 @@ cfg_if::cfg_if! {
pub fn range(&self) -> Option<Range<usize>> {
self.0.range()
}
}

pub struct Fiber {
stack: FiberStack,
inner: imp::Fiber,
done: Cell<bool>,
}

impl Fiber {
/// Creates a new fiber which will execute `func` on the given stack.
///
/// This function returns a `Fiber` which, when resumed, will execute `func`
/// to completion. When desired the `func` can suspend itself via
/// `Fiber::suspend`.
pub fn new(
stack: FiberStack,
func_ref: *const VMFuncRef,
caller_vmctx: *mut VMContext,
args_ptr: *mut ValRaw,
args_capacity: usize,
) -> io::Result<Self> {
let inner = imp::Fiber::new(&stack.0, func_ref, caller_vmctx, args_ptr, args_capacity)?;

Ok(Self {
stack,
inner,
done: Cell::new(false),
})
}

/// Resumes execution of this fiber.
///
/// This function will transfer execution to the fiber and resume from where
/// it last left off.
///
/// Returns `true` if the fiber finished or `false` if the fiber was
/// suspended in the middle of execution.
///
/// # Panics
///
/// Panics if the current thread is already executing a fiber or if this
/// fiber has already finished.
///
/// Note that if the fiber itself panics during execution then the panic
/// will be propagated to this caller.
pub fn resume(&self) -> ControlEffect {
assert!(!self.done.replace(true), "cannot resume a finished fiber");
let reason = self.inner.resume(&self.stack.0);
if ControlEffect::is_suspend(reason) {
self.done.set(false)
}
reason
self.0.resume()
}

/// Returns whether this fiber has finished executing.
pub fn done(&self) -> bool {
self.done.get()
pub fn suspend(&self, payload: ControlEffect) {
self.0.suspend(payload)
}

/// Gets the stack associated with this fiber.
pub fn stack(&self) -> &FiberStack {
&self.stack
pub fn initialize(
&self,
func_ref: *const VMFuncRef,
caller_vmctx: *mut VMContext,
args_ptr: *mut ValRaw,
args_capacity: usize,
) {
self.0.initialize(func_ref, caller_vmctx, args_ptr, args_capacity)
}
}

impl Drop for Fiber {
fn drop(&mut self) {
debug_assert!(self.done.get(), "fiber dropped without finishing");
}
}

}
}
Loading

0 comments on commit 6ffb849

Please sign in to comment.