Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

wiggle: Refactor with fewer raw pointers #5268

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 0 additions & 10 deletions crates/wiggle/generate/src/types/flags.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,15 +87,5 @@ pub(super) fn define_flags(
#repr::write(&location.cast(), val)
}
}
unsafe impl<'a> #rt::GuestTypeTransparent<'a> for #ident {
#[inline]
fn validate(location: *mut #ident) -> Result<(), #rt::GuestError> {
use std::convert::TryFrom;
// Validate value in memory using #ident::try_from(reprval)
let reprval = unsafe { (location as *mut #repr).read() };
let _val = #ident::try_from(reprval)?;
Ok(())
}
}
}
}
8 changes: 0 additions & 8 deletions crates/wiggle/generate/src/types/handle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,14 +70,6 @@ pub(super) fn define_handle(
u32::write(&location.cast(), val.0)
}
}

unsafe impl<'a> #rt::GuestTypeTransparent<'a> for #ident {
#[inline]
fn validate(_location: *mut #ident) -> Result<(), #rt::GuestError> {
// All bit patterns accepted
Ok(())
}
}
}
}

Expand Down
28 changes: 0 additions & 28 deletions crates/wiggle/generate/src/types/record.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,32 +85,6 @@ pub(super) fn define_struct(
(quote!(), quote!(, PartialEq))
};

let transparent = if s.is_transparent() {
let member_validate = s.member_layout().into_iter().map(|ml| {
let offset = ml.offset;
let typename = names.type_ref(&ml.member.tref, anon_lifetime());
quote! {
// SAFETY: caller has validated bounds and alignment of `location`.
// member_layout gives correctly-aligned pointers inside that area.
#typename::validate(
unsafe { (location as *mut u8).add(#offset) as *mut _ }
)?;
}
});

quote! {
unsafe impl<'a> #rt::GuestTypeTransparent<'a> for #ident {
#[inline]
fn validate(location: *mut #ident) -> Result<(), #rt::GuestError> {
#(#member_validate)*
Ok(())
}
}
}
} else {
quote!()
};

quote! {
#[derive(Clone, Debug #extra_derive)]
pub struct #ident #struct_lifetime {
Expand All @@ -136,8 +110,6 @@ pub(super) fn define_struct(
Ok(())
}
}

#transparent
}
}

Expand Down
2 changes: 1 addition & 1 deletion crates/wiggle/generate/src/wasmtime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ fn generate_func(
};
let (mem , ctx) = mem.data_and_store_mut(&mut caller);
let ctx = get_cx(ctx);
let mem = #rt::wasmtime::WasmtimeGuestMemory::new(mem, false);
let mem = #rt::wasmtime::WasmtimeGuestMemory::new(mem);
Ok(<#ret_ty>::from(#abi_func(ctx, &mem #(, #arg_names)*) #await_ ?))
};

Expand Down
141 changes: 46 additions & 95 deletions crates/wiggle/src/guest_type.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::{region::Region, GuestError, GuestPtr};
use crate::{GuestError, GuestPtr};
use std::mem;
use std::sync::atomic::{
AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicU16, AtomicU32, AtomicU64, AtomicU8, Ordering,
Expand Down Expand Up @@ -51,17 +51,12 @@ pub trait GuestType<'a>: Sized {
/// as in Rust. These types can be used with the `GuestPtr::as_slice` method to
/// view as a slice.
///
/// Unsafe trait because a correct GuestTypeTransparent implemengation ensures that the
/// GuestPtr::as_slice methods are safe. This trait should only ever be implemented
/// by wiggle_generate-produced code.
pub unsafe trait GuestTypeTransparent<'a>: GuestType<'a> {
/// Checks that the memory at `ptr` is a valid representation of `Self`.
///
/// Assumes that memory safety checks have already been performed: `ptr`
/// has been checked to be aligned correctly and reside in memory using
/// `GuestMemory::validate_size_align`
fn validate(ptr: *mut Self) -> Result<(), GuestError>;
}
/// Unsafe trait because a correct `GuestTypeTransparent` implementation ensures
/// that the `GuestPtr::as_slice` methods are safe, notably that the
/// representation on the host matches the guest and all bit patterns are
/// valid. This trait should only ever be implemented by
/// wiggle_generate-produced code.
pub unsafe trait GuestTypeTransparent<'a>: GuestType<'a> {}

macro_rules! integer_primitives {
($([$ty:ident, $ty_atomic:ident],)*) => ($(
Expand All @@ -71,71 +66,55 @@ macro_rules! integer_primitives {

#[inline]
fn read(ptr: &GuestPtr<'a, Self>) -> Result<Self, GuestError> {
// Any bit pattern for any primitive implemented with this
// macro is safe, so our `validate_size_align` method will
// guarantee that if we are given a pointer it's valid for the
// size of our type as well as properly aligned. Consequently we
// should be able to safely ready the pointer just after we
// validated it, returning it along here.
// Use `validate_size_align` to validate offset and alignment
// internally. The `host_ptr` type will be `&UnsafeCell<Self>`
// indicating that the memory is valid, and next safety checks
// are required to access it.
let offset = ptr.offset();
let size = Self::guest_size();
let host_ptr = ptr.mem().validate_size_align(
offset,
Self::guest_align(),
size,
)?;
let region = Region {
start: offset,
len: size,
};
let (host_ptr, region) = super::validate_size_align::<Self>(ptr.mem(), offset, 1)?;
let host_ptr = &host_ptr[0];

// If this memory is mutable borrowed then it cannot be read
// here, so skip this operation.
//
// Note that shared memories don't allow borrows and other
// shared borrows are ok to overlap with this.
if ptr.mem().is_mut_borrowed(region) {
return Err(GuestError::PtrBorrowed(region));
}

// If the accessed memory is shared, we need to load the bytes
// with the correct memory consistency. We could check if the
// memory is shared each time, but we expect little performance
// difference between an additional branch and a relaxed memory
// access and thus always do the relaxed access here.
let atomic_value_ref: &$ty_atomic =
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
Ok($ty::from_le(atomic_value_ref.load(Ordering::Relaxed)))
unsafe { &*(host_ptr.get().cast::<$ty_atomic>()) };
let val = atomic_value_ref.load(Ordering::Relaxed);

// And as a final operation convert from the little-endian wasm
// value to a native-endian value for the host.
Ok($ty::from_le(val))
}

#[inline]
fn write(ptr: &GuestPtr<'_, Self>, val: Self) -> Result<(), GuestError> {
// See `read` above for various checks here.
let val = val.to_le();
let offset = ptr.offset();
let size = Self::guest_size();
let host_ptr = ptr.mem().validate_size_align(
offset,
Self::guest_align(),
size,
)?;
let region = Region {
start: offset,
len: size,
};
let (host_ptr, region) = super::validate_size_align::<Self>(ptr.mem(), offset, 1)?;
let host_ptr = &host_ptr[0];
if ptr.mem().is_shared_borrowed(region) || ptr.mem().is_mut_borrowed(region) {
return Err(GuestError::PtrBorrowed(region));
}
// If the accessed memory is shared, we need to load the bytes
// with the correct memory consistency. We could check if the
// memory is shared each time, but we expect little performance
// difference between an additional branch and a relaxed memory
// access and thus always do the relaxed access here.
let atomic_value_ref: &$ty_atomic =
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
atomic_value_ref.store(val.to_le(), Ordering::Relaxed);
unsafe { &*(host_ptr.get().cast::<$ty_atomic>()) };
atomic_value_ref.store(val, Ordering::Relaxed);
Ok(())
}
}

unsafe impl<'a> GuestTypeTransparent<'a> for $ty {
#[inline]
fn validate(_ptr: *mut $ty) -> Result<(), GuestError> {
// All bit patterns are safe, nothing to do here
Ok(())
}
}
unsafe impl<'a> GuestTypeTransparent<'a> for $ty {}

)*)
}
Expand All @@ -148,73 +127,45 @@ macro_rules! float_primitives {

#[inline]
fn read(ptr: &GuestPtr<'a, Self>) -> Result<Self, GuestError> {
// Any bit pattern for any primitive implemented with this
// macro is safe, so our `validate_size_align` method will
// guarantee that if we are given a pointer it's valid for the
// size of our type as well as properly aligned. Consequently we
// should be able to safely ready the pointer just after we
// validated it, returning it along here.
// For more commentary see `read` for integers
let offset = ptr.offset();
let size = Self::guest_size();
let host_ptr = ptr.mem().validate_size_align(
let (host_ptr, region) = super::validate_size_align::<$ty_unsigned>(
ptr.mem(),
offset,
Self::guest_align(),
size,
1,
)?;
let region = Region {
start: offset,
len: size,
};
let host_ptr = &host_ptr[0];
if ptr.mem().is_mut_borrowed(region) {
return Err(GuestError::PtrBorrowed(region));
}
// If the accessed memory is shared, we need to load the bytes
// with the correct memory consistency. We could check if the
// memory is shared each time, but we expect little performance
// difference between an additional branch and a relaxed memory
// access and thus always do the relaxed access here.
let atomic_value_ref: &$ty_atomic =
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
unsafe { &*(host_ptr.get().cast::<$ty_atomic>()) };
let value = $ty_unsigned::from_le(atomic_value_ref.load(Ordering::Relaxed));
Ok($ty::from_bits(value))
}

#[inline]
fn write(ptr: &GuestPtr<'_, Self>, val: Self) -> Result<(), GuestError> {
// For more commentary see `read`/`write` for integers.
let offset = ptr.offset();
let size = Self::guest_size();
let host_ptr = ptr.mem().validate_size_align(
let (host_ptr, region) = super::validate_size_align::<$ty_unsigned>(
ptr.mem(),
offset,
Self::guest_align(),
size,
1,
)?;
let region = Region {
start: offset,
len: size,
};
let host_ptr = &host_ptr[0];
if ptr.mem().is_shared_borrowed(region) || ptr.mem().is_mut_borrowed(region) {
return Err(GuestError::PtrBorrowed(region));
}
// If the accessed memory is shared, we need to load the bytes
// with the correct memory consistency. We could check if the
// memory is shared each time, but we expect little performance
// difference between an additional branch and a relaxed memory
// access and thus always do the relaxed access here.
let atomic_value_ref: &$ty_atomic =
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
unsafe { &*(host_ptr.get().cast::<$ty_atomic>()) };
let le_value = $ty_unsigned::to_le(val.to_bits());
atomic_value_ref.store(le_value, Ordering::Relaxed);
Ok(())
}
}

unsafe impl<'a> GuestTypeTransparent<'a> for $ty {
#[inline]
fn validate(_ptr: *mut $ty) -> Result<(), GuestError> {
// All bit patterns are safe, nothing to do here
Ok(())
}
}
unsafe impl<'a> GuestTypeTransparent<'a> for $ty {}

)*)
}
Expand Down
Loading