Skip to content

Commit

Permalink
introduce all the necessary features for reusing contract data memories
Browse files Browse the repository at this point in the history
  • Loading branch information
Ekleog-NEAR committed Apr 24, 2024
1 parent a9ddf9c commit c1228e5
Show file tree
Hide file tree
Showing 5 changed files with 126 additions and 35 deletions.
15 changes: 14 additions & 1 deletion runtime/near-vm-runner/src/near_vm_runner/memory.rs
Original file line number Diff line number Diff line change
@@ -1,16 +1,19 @@
use crate::logic::{MemSlice, MemoryLike};
use near_vm_types::{MemoryType, Pages};
use near_vm_vm::{LinearMemory, MemoryStyle, VMMemory};
use near_vm_vm::{LinearMemory, MemoryStyle, Mmap, VMMemory};
use std::borrow::Cow;
use std::sync::Arc;

#[derive(Clone)]
pub struct NearVmMemory(Arc<LinearMemory>);

pub struct PreallocatedMemory(Mmap);

impl NearVmMemory {
pub fn new(
initial_memory_pages: u32,
max_memory_pages: u32,
from_preallocated: Option<PreallocatedMemory>,
) -> Result<Self, near_vm_vm::MemoryError> {
let max_pages = Pages(max_memory_pages);
Ok(NearVmMemory(Arc::new(LinearMemory::new(
Expand All @@ -19,9 +22,19 @@ impl NearVmMemory {
bound: max_pages,
offset_guard_size: near_vm_types::WASM_PAGE_SIZE as u64,
},
from_preallocated.map(|m| m.0),
)?)))
}

#[cfg(unused)] // TODO: this will be used once we reuse the memories
pub fn into_preallocated(self) -> Result<PreallocatedMemory, String> {
Ok(PreallocatedMemory(
Arc::into_inner(self.0)
.ok_or("Multiple references to NearVmMemory prevent its reuse")?
.into_mmap()?,
))
}

/// Returns pointer to memory at the specified offset provided that there’s
/// enough space in the buffer starting at the returned pointer.
///
Expand Down
3 changes: 2 additions & 1 deletion runtime/near-vm-runner/src/near_vm_runner/runner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -304,6 +304,7 @@ impl NearVM {
let mut memory = NearVmMemory::new(
self.config.limit_config.initial_memory_pages,
self.config.limit_config.max_memory_pages,
None, // TODO: this should actually reuse the memories
)
.expect("Cannot create memory for a contract call");
// FIXME: this mostly duplicates the `run_module` method.
Expand Down Expand Up @@ -637,7 +638,7 @@ mod tests {
#[test]
fn test_memory_like() {
crate::logic::test_utils::test_memory_like(|| {
Box::new(super::NearVmMemory::new(1, 1).unwrap())
Box::new(super::NearVmMemory::new(1, 1, None).unwrap())
});
}
}
2 changes: 1 addition & 1 deletion runtime/near-vm/test-api/src/sys/tunables.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ impl Tunables for BaseTunables {
ty: &MemoryType,
style: &MemoryStyle,
) -> Result<Arc<LinearMemory>, MemoryError> {
Ok(Arc::new(LinearMemory::new(&ty, &style)?))
Ok(Arc::new(LinearMemory::new(&ty, &style, None)?))
}

/// Create a memory owned by the VM given a [`MemoryType`] and a [`MemoryStyle`].
Expand Down
41 changes: 32 additions & 9 deletions runtime/near-vm/vm/src/memory/linear_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,15 @@ impl LinearMemory {
///
/// This creates a `LinearMemory` with owned metadata: this can be used to create a memory
/// that will be imported into Wasm modules.
pub fn new(memory: &MemoryType, style: &MemoryStyle) -> Result<Self, MemoryError> {
unsafe { Self::new_internal(memory, style, None) }
///
/// If `from_mmap` is passed in, then this linear memory will attempt to reuse the underlying
/// allocation from there.
pub fn new(
memory: &MemoryType,
style: &MemoryStyle,
from_mmap: Option<Mmap>,
) -> Result<Self, MemoryError> {
unsafe { Self::new_internal(memory, style, None, from_mmap) }
}

/// Create a new linear memory instance with specified minimum and maximum number of wasm pages.
Expand All @@ -86,14 +93,15 @@ impl LinearMemory {
style: &MemoryStyle,
vm_memory_location: NonNull<VMMemoryDefinition>,
) -> Result<Self, MemoryError> {
Self::new_internal(memory, style, Some(vm_memory_location))
Self::new_internal(memory, style, Some(vm_memory_location), None)
}

/// Build a `LinearMemory` with either self-owned or VM owned metadata.
unsafe fn new_internal(
memory: &MemoryType,
style: &MemoryStyle,
vm_memory_location: Option<NonNull<VMMemoryDefinition>>,
from_mmap: Option<Mmap>,
) -> Result<Self, MemoryError> {
if memory.minimum > Pages::max_value() {
return Err(MemoryError::MinimumMemoryTooLarge {
Expand Down Expand Up @@ -133,11 +141,20 @@ impl LinearMemory {
let mapped_pages = memory.minimum;
let mapped_bytes = mapped_pages.bytes();

let mut mmap = WasmMmap {
alloc: Mmap::accessible_reserved(mapped_bytes.0, request_bytes)
.map_err(MemoryError::Region)?,
size: memory.minimum,
let alloc = if let Some(alloc) = from_mmap {
// For now we always request the same size, because our prepare step hardcodes a maximum size
// of 64 MiB. This could change in the future, at which point this assert will start triggering
// and we’ll need to think of a better way to handle things.
assert_eq!(
alloc.len(),
request_bytes,
"Multiple data memory mmap's had different maximal lengths"
);
alloc
} else {
Mmap::accessible_reserved(mapped_bytes.0, request_bytes).map_err(MemoryError::Region)?
};
let mut mmap = WasmMmap { alloc, size: memory.minimum };

let base_ptr = mmap.alloc.as_mut_ptr();
let mem_length = memory.minimum.bytes().0;
Expand All @@ -163,6 +180,13 @@ impl LinearMemory {
})
}

/// Discard this linear memory, turning it back into a raw allocation ready for reuse
pub fn into_mmap(self) -> Result<Mmap, String> {
let mut res = self.mmap.into_inner().unwrap().alloc;
res.reset()?;
Ok(res)
}

/// Get the `VMMemoryDefinition`.
///
/// # Safety
Expand Down Expand Up @@ -239,7 +263,6 @@ impl LinearMemory {
}

let delta_bytes = delta.bytes().0;
let prev_bytes = prev_pages.bytes().0;
let new_bytes = new_pages.bytes().0;

if new_bytes > mmap.alloc.len() - self.offset_guard_size {
Expand All @@ -261,7 +284,7 @@ impl LinearMemory {
mmap.alloc = new_mmap;
} else if delta_bytes > 0 {
// Make the newly allocated pages accessible.
mmap.alloc.make_accessible(prev_bytes, delta_bytes).map_err(MemoryError::Region)?;
mmap.alloc.make_accessible(new_bytes).map_err(MemoryError::Region)?;
}

mmap.size = new_pages;
Expand Down
100 changes: 77 additions & 23 deletions runtime/near-vm/vm/src/mmap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ pub struct Mmap {
// the coordination all happens at the OS layer.
ptr: usize,
len: usize,
accessible_len: usize,
}

impl Mmap {
Expand All @@ -34,7 +35,7 @@ impl Mmap {
// contains code to create a non-null dangling pointer value when
// constructed empty, so we reuse that here.
let empty = Vec::<u8>::new();
Self { ptr: empty.as_ptr() as usize, len: 0 }
Self { ptr: empty.as_ptr() as usize, len: 0, accessible_len: 0 }
}

/// Create a new `Mmap` pointing to at least `size` bytes of page-aligned accessible memory.
Expand Down Expand Up @@ -79,7 +80,7 @@ impl Mmap {
return Err(io::Error::last_os_error().to_string());
}

Self { ptr: ptr as usize, len: mapping_size }
Self { ptr: ptr as usize, len: mapping_size, accessible_len: accessible_size }
} else {
// Reserve the mapping size.
let ptr = unsafe {
Expand All @@ -96,11 +97,11 @@ impl Mmap {
return Err(io::Error::last_os_error().to_string());
}

let mut result = Self { ptr: ptr as usize, len: mapping_size };
let mut result = Self { ptr: ptr as usize, len: mapping_size, accessible_len: 0 };

if accessible_size != 0 {
// Commit the accessible size.
result.make_accessible(0, accessible_size)?;
result.make_accessible(accessible_size)?;
}

result
Expand Down Expand Up @@ -152,59 +153,112 @@ impl Mmap {
return Err(io::Error::last_os_error().to_string());
}

let mut result = Self { ptr: ptr as usize, len: mapping_size };
let mut result = Self { ptr: ptr as usize, len: mapping_size, accessible_len: 0 };

if accessible_size != 0 {
// Commit the accessible size.
result.make_accessible(0, accessible_size)?;
result.make_accessible(accessible_size)?;
}

result
})
}

/// Make the memory starting at `start` and extending for `len` bytes accessible.
/// `start` and `len` must be native page-size multiples and describe a range within
/// Make the memory accessible for `len` bytes.
/// `len` must be a native page-size multiple and describe a range within
/// `self`'s reserved memory.
#[cfg(not(target_os = "windows"))]
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
pub fn make_accessible(&mut self, new_len: usize) -> Result<(), String> {
let page_size = region::page::size();
assert_eq!(start & (page_size - 1), 0);
assert_eq!(len & (page_size - 1), 0);
assert_lt!(len, self.len);
assert_lt!(start, self.len - len);
assert_eq!(new_len & (page_size - 1), 0);
assert_lt!(new_len, self.len);
let Some(additional_len) = new_len.checked_sub(self.accessible_len) else {
return Ok(());
};

// Commit the accessible size.
unsafe { region::protect(self.as_ptr().add(start), len, region::Protection::READ_WRITE) }
.map_err(|e| e.to_string())
unsafe {
region::protect(
self.as_ptr().add(self.accessible_len),
additional_len,
region::Protection::READ_WRITE,
)
}
.map_err(|e| e.to_string())?;
self.accessible_len = new_len;
Ok(())
}

/// Make the memory starting at `start` and extending for `len` bytes accessible.
/// `start` and `len` must be native page-size multiples and describe a range within
/// Make the memory accessible for `len` bytes.
/// `len` must be a native page-size multiple and describe a range within
/// `self`'s reserved memory.
#[cfg(target_os = "windows")]
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
pub fn make_accessible(&mut self, new_len: usize) -> Result<(), String> {
use winapi::ctypes::c_void;
use winapi::um::memoryapi::VirtualAlloc;
use winapi::um::winnt::{MEM_COMMIT, PAGE_READWRITE};
let page_size = region::page::size();
assert_eq!(start & (page_size - 1), 0);
assert_eq!(len & (page_size - 1), 0);
assert_lt!(len, self.len);
assert_lt!(start, self.len - len);
assert_eq!(new_len & (page_size - 1), 0);
assert_lt!(new_len, self.len);
let Some(additional_len) = new_len.checked_sub(self.accessible_len) else {
return Ok(());
};

// Commit the accessible size.
if unsafe {
VirtualAlloc(self.as_ptr().add(start) as *mut c_void, len, MEM_COMMIT, PAGE_READWRITE)
VirtualAlloc(
self.as_ptr().add(self.accessible_len) as *mut c_void,
new_len,
MEM_COMMIT,
PAGE_READWRITE,
)
}
.is_null()
{
return Err(io::Error::last_os_error().to_string());
}

self.accessible_len = new_len;
Ok(())
}

/// Resets the mmap, putting all byte values back to 0 and resetting the accessible length
#[cfg(not(target_os = "windows"))]
pub fn reset(&mut self) -> Result<(), String> {
unsafe {
self.as_mut_ptr().write_bytes(0, self.accessible_len);
region::protect(self.as_ptr(), self.accessible_len, region::Protection::NONE)
.map_err(|e| e.to_string())?;
self.accessible_len = 0;
Ok(())
}
}

/// Resets the mmap, putting all byte values back to 0 and resetting the accessible length
#[cfg(target_os = "windows")]
pub fn reset(&mut self) -> Result<(), String> {
use winapi::ctypes::c_void;
use winapi::um::memoryapi::VirtualAlloc;
use winapi::um::winnt::{MEM_COMMIT, PAGE_NOACCESS};

// Commit the accessible size.
unsafe {
self.as_mut_ptr().write_bytes(0, self.accessible_len);
if VirtualAlloc(
self.as_ptr() as *mut c_void,
self.accessible_len,
MEM_COMMIT,
PAGE_NOACCESS,
)
.is_null()
{
return Err(io::Error::last_os_error().to_string());
}
self.accessible_len = 0;
Ok(())
}
}

/// Return the allocated memory as a slice of u8.
pub fn as_slice(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.as_ptr(), self.len) }
Expand Down

0 comments on commit c1228e5

Please sign in to comment.