From 9649219578a3f2de07f8a4e2d7beabc712f5f279 Mon Sep 17 00:00:00 2001 From: losfair Date: Fri, 8 Feb 2019 23:56:14 +0800 Subject: [PATCH 001/100] Initial work on WebAssembly parser for Dynasm backend. --- Cargo.lock | 8 + Cargo.toml | 2 +- cranelift | 1 + lib/dynasm-backend/Cargo.toml | 12 ++ lib/dynasm-backend/src/lib.rs | 302 ++++++++++++++++++++++++++++++++ lib/runtime-core/src/backend.rs | 1 + lib/runtime-core/src/error.rs | 9 + lib/runtime-core/src/types.rs | 15 ++ 8 files changed, 349 insertions(+), 1 deletion(-) create mode 160000 cranelift create mode 100644 lib/dynasm-backend/Cargo.toml create mode 100644 lib/dynasm-backend/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index d061df27bca..59514caff59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -821,6 +821,14 @@ dependencies = [ "wasmparser 0.23.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "wasmer-dynasm-backend" +version = "0.1.0" +dependencies = [ + "wasmer-runtime-core 0.1.2", + "wasmparser 0.23.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "wasmer-emscripten" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index b570c9b0901..fa0e68a0012 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,7 @@ wasmer-runtime-core = { path = "lib/runtime-core" } wasmer-emscripten = { path = "lib/emscripten" } [workspace] -members = ["lib/clif-backend", "lib/runtime", "lib/runtime-core", "lib/emscripten", "lib/spectests"] +members = ["lib/clif-backend", "lib/dynasm-backend", "lib/runtime", "lib/runtime-core", "lib/emscripten", "lib/spectests"] [build-dependencies] wabt = "0.7.2" diff --git a/cranelift b/cranelift new file mode 160000 index 00000000000..cb62a1ead2c --- /dev/null +++ b/cranelift @@ -0,0 +1 @@ +Subproject commit cb62a1ead2c5346ccb0f1224ecae5939ac064f87 diff --git a/lib/dynasm-backend/Cargo.toml b/lib/dynasm-backend/Cargo.toml new file mode 100644 index 00000000000..da6b2a9e57a --- /dev/null +++ b/lib/dynasm-backend/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "wasmer-dynasm-backend" +version = "0.1.0" +repository = "https://github.com/wasmerio/wasmer" +description = "Wasmer runtime Dynasm compiler backend" +license = "MIT" +authors = ["The Wasmer Engineering Team "] +edition = "2018" + +[dependencies] +wasmer-runtime-core = { path = "../runtime-core", version = "0.1.2" } +wasmparser = "0.23.0" diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs new file mode 100644 index 00000000000..538cf50a9f4 --- /dev/null +++ b/lib/dynasm-backend/src/lib.rs @@ -0,0 +1,302 @@ +use std::ptr::NonNull; +use std::sync::Arc; +use wasmer_runtime_core::{ + backend::{Backend, Compiler, FuncResolver, ProtectedCaller, Token}, + error::{CompileError, CompileResult, RuntimeResult}, + module::{ + DataInitializer, ExportIndex, ImportName, ModuleInfo, ModuleInner, StringTable, + TableInitializer, + }, + structures::{Map, TypedIndex}, + types::{ + ElementType, FuncIndex, FuncSig, GlobalDescriptor, GlobalIndex, GlobalInit, + ImportedGlobalIndex, Initializer, LocalFuncIndex, MemoryDescriptor, MemoryIndex, + TableDescriptor, TableIndex, Type as CoreType, Value, + }, + units::Pages, + vm::{self, ImportBacking}, +}; +use wasmparser::{ + self, ExternalKind, FuncType, ImportSectionEntryType, InitExpr, MemoryType, ModuleReader, + Operator, SectionCode, TableType, Type, WasmDecoder, +}; + +struct Placeholder; + +impl FuncResolver for Placeholder { + fn get( + &self, + _module: &ModuleInner, + _local_func_index: LocalFuncIndex, + ) -> Option> { + None + } +} + +impl ProtectedCaller for Placeholder { + fn call( + &self, + _module: &ModuleInner, + _func_index: FuncIndex, + _params: &[Value], + _import_backing: &ImportBacking, + _vmctx: *mut vm::Ctx, + _: Token, + ) -> RuntimeResult> { + Ok(vec![]) + } +} + +pub struct DynasmCompiler {} + +impl DynasmCompiler { + pub fn new() -> DynasmCompiler { + DynasmCompiler {} + } +} + +impl Compiler for DynasmCompiler { + fn compile(&self, wasm: &[u8], _: Token) -> CompileResult { + validate(wasm)?; + + let mut reader = ModuleReader::new(wasm)?; + let mut m = ModuleInner { + // this is a placeholder + func_resolver: Box::new(Placeholder), + protected_caller: Box::new(Placeholder), + + info: ModuleInfo { + memories: Map::new(), + globals: Map::new(), + tables: Map::new(), + + imported_functions: Map::new(), + imported_memories: Map::new(), + imported_tables: Map::new(), + imported_globals: Map::new(), + + exports: Default::default(), + + data_initializers: Vec::new(), + elem_initializers: Vec::new(), + + start_func: None, + + func_assoc: Map::new(), + signatures: Map::new(), + backend: Backend::Cranelift, + + namespace_table: StringTable::new(), + name_table: StringTable::new(), + }, + }; + let mut types: Vec = Vec::new(); + + loop { + if reader.eof() { + return Ok(m); + } + let section = reader.read()?; + match section.code { + SectionCode::Custom { .. } => {} + SectionCode::Type => { + let mut ty_reader = section.get_type_section_reader()?; + let count = ty_reader.get_count(); + for _ in 0..count { + types.push(ty_reader.read()?); + } + } + SectionCode::Import => { + let mut imp_reader = section.get_import_section_reader()?; + let count = imp_reader.get_count(); + for _ in 0..count { + let imp = imp_reader.read()?; + // FIXME: not implemented + } + } + SectionCode::Function => { + let mut func_reader = section.get_function_section_reader()?; + let count = func_reader.get_count(); + for _ in 0..count { + let ty_id = func_reader.read()? as usize; + m.info.signatures.push(Arc::new(FuncSig::new( + types[ty_id] + .params + .iter() + .cloned() + .map(CoreType::from_wasmparser_type) + .collect::>>()?, + types[ty_id] + .returns + .iter() + .cloned() + .map(CoreType::from_wasmparser_type) + .collect::>>()?, + ))); + } + } + SectionCode::Table => { + let mut table_reader = section.get_table_section_reader()?; + let count = table_reader.get_count(); + for _ in 0..count { + let tt = table_reader.read()?; + if tt.element_type != Type::AnyFunc { + return Err(CompileError::InternalError { + msg: "unsupported table element type".into(), + }); + } + m.info.tables.push(TableDescriptor { + element: ElementType::Anyfunc, + minimum: tt.limits.initial, + maximum: tt.limits.maximum, + }); + } + } + SectionCode::Memory => { + let mut mem_reader = section.get_memory_section_reader()?; + let count = mem_reader.get_count(); + for _ in 0..count { + let mem_info = mem_reader.read()?; + m.info.memories.push(MemoryDescriptor { + minimum: Pages(mem_info.limits.initial), + maximum: mem_info.limits.maximum.map(Pages), + shared: mem_info.shared, + }); + } + } + SectionCode::Global => { + let mut global_reader = section.get_global_section_reader()?; + let count = global_reader.get_count(); + for _ in 0..count { + let info = global_reader.read()?; + m.info.globals.push(GlobalInit { + desc: GlobalDescriptor { + mutable: info.ty.mutable, + ty: CoreType::from_wasmparser_type(info.ty.content_type)?, + }, + init: eval_init_expr(&info.init_expr)?, + }); + } + } + SectionCode::Export => { + let mut export_reader = section.get_export_section_reader()?; + let count = export_reader.get_count(); + for _ in 0..count { + let v = export_reader.read()?; + m.info.exports.insert( + match ::std::str::from_utf8(v.field) { + Ok(x) => x.to_string(), + Err(_) => { + return Err(CompileError::InternalError { + msg: "field name not in utf-8".into(), + }) + } + }, + match v.kind { + ExternalKind::Function => { + ExportIndex::Func(FuncIndex::new(v.index as usize)) + } + ExternalKind::Global => { + ExportIndex::Global(GlobalIndex::new(v.index as usize)) + } + ExternalKind::Memory => { + ExportIndex::Memory(MemoryIndex::new(v.index as usize)) + } + ExternalKind::Table => { + ExportIndex::Table(TableIndex::new(v.index as usize)) + } + }, + ); + } + } + SectionCode::Start => { + m.info.start_func = + Some(FuncIndex::new(section.get_start_section_content()? as usize)); + } + SectionCode::Element => { + let mut element_reader = section.get_element_section_reader()?; + let count = element_reader.get_count(); + for _ in 0..count { + let elem = element_reader.read()?; + let table_index = elem.table_index as usize; + + let mut item_reader = elem.items.get_items_reader()?; + let item_count = item_reader.get_count() as usize; + + m.info.elem_initializers.push(TableInitializer { + table_index: TableIndex::new(table_index), + base: eval_init_expr(&elem.init_expr)?, + elements: (0..item_count) + .map(|_| Ok(FuncIndex::new(item_reader.read()? as usize))) + .collect::>()?, + }); + } + } + SectionCode::Code => { + let mut code_reader = section.get_code_section_reader()?; + let count = code_reader.get_count() as usize; + + if count != m.info.signatures.len() { + return Err(CompileError::InternalError { + msg: "len(function_bodies) != len(functions)".into(), + }); + } + + for i in 0..count { + let body = code_reader.read()?; + // FIXME: not implemented + } + } + SectionCode::Data => { + let mut data_reader = section.get_data_section_reader()?; + let count = data_reader.get_count(); + for _ in 0..count { + let initializer = data_reader.read()?; + m.info.data_initializers.push(DataInitializer { + memory_index: MemoryIndex::new(initializer.memory_index as usize), + base: eval_init_expr(&initializer.init_expr)?, + data: initializer.data.to_vec(), + }); + } + } + } + } + } +} + +fn validate(bytes: &[u8]) -> CompileResult<()> { + let mut parser = wasmparser::ValidatingParser::new(bytes, None); + loop { + let state = parser.read(); + match *state { + wasmparser::ParserState::EndWasm => break Ok(()), + wasmparser::ParserState::Error(err) => Err(CompileError::ValidationError { + msg: err.message.to_string(), + })?, + _ => {} + } + } +} + +fn eval_init_expr(expr: &InitExpr) -> CompileResult { + let mut reader = expr.get_operators_reader(); + let op = reader.read()?; + Ok(match op { + Operator::GetGlobal { global_index } => { + Initializer::GetGlobal(ImportedGlobalIndex::new(global_index as usize)) + } + Operator::I32Const { value } => Initializer::Const(Value::I32(value)), + Operator::I64Const { value } => Initializer::Const(Value::I64(value)), + Operator::F32Const { value } => { + Initializer::Const(Value::F32(unsafe { ::std::mem::transmute(value.bits()) })) + } + Operator::F64Const { value } => { + Initializer::Const(Value::F64(unsafe { ::std::mem::transmute(value.bits()) })) + } + _ => { + return Err(CompileError::InternalError { + msg: "init expr evaluation failed: unsupported opcode".into(), + }) + } + }) +} diff --git a/lib/runtime-core/src/backend.rs b/lib/runtime-core/src/backend.rs index 8fef9bee36b..12e93534e44 100644 --- a/lib/runtime-core/src/backend.rs +++ b/lib/runtime-core/src/backend.rs @@ -23,6 +23,7 @@ pub use crate::sig_registry::SigRegistry; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum Backend { Cranelift, + Dynasm, } /// This type cannot be constructed from diff --git a/lib/runtime-core/src/error.rs b/lib/runtime-core/src/error.rs index 60d6175ee7b..523929e9e9c 100644 --- a/lib/runtime-core/src/error.rs +++ b/lib/runtime-core/src/error.rs @@ -2,6 +2,7 @@ use crate::types::{ FuncSig, GlobalDescriptor, MemoryDescriptor, MemoryIndex, TableDescriptor, TableIndex, Type, }; use std::sync::Arc; +use wasmparser::BinaryReaderError; pub type Result = std::result::Result; pub type CompileResult = std::result::Result; @@ -21,6 +22,14 @@ pub enum CompileError { InternalError { msg: String }, } +impl From for CompileError { + fn from(other: BinaryReaderError) -> CompileError { + CompileError::InternalError { + msg: format!("{:?}", other), + } + } +} + impl PartialEq for CompileError { fn eq(&self, _other: &CompileError) -> bool { false diff --git a/lib/runtime-core/src/types.rs b/lib/runtime-core/src/types.rs index c6b3f0a2bbd..d30b1ecb265 100644 --- a/lib/runtime-core/src/types.rs +++ b/lib/runtime-core/src/types.rs @@ -1,3 +1,4 @@ +use crate::error::{CompileError, CompileResult}; use crate::{memory::MemoryType, module::ModuleInner, structures::TypedIndex, units::Pages}; use std::{borrow::Cow, mem}; @@ -15,6 +16,20 @@ pub enum Type { F64, } +impl Type { + pub fn from_wasmparser_type(other: ::wasmparser::Type) -> CompileResult { + use wasmparser::Type as WPType; + match other { + WPType::I32 => Ok(Type::I32), + WPType::I64 => Ok(Type::I64), + WPType::F32 => Ok(Type::F32), + WPType::F64 => Ok(Type::F64), + _ => Err(CompileError::ValidationError { + msg: "type cannot be converted into a core type".into(), + }), + } + } +} /// Represents a WebAssembly value. /// /// As the number of types in WebAssembly expand, From af19f5c097b4d07682c339784918db5321635737 Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 12 Feb 2019 00:51:26 +0800 Subject: [PATCH 002/100] Update dependencies. --- Cargo.lock | 67 ++++++++++++++++++++++++++++++++--- lib/dynasm-backend/Cargo.toml | 6 ++-- 2 files changed, 67 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 59514caff59..8f1bd4cf7ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -28,7 +28,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "block-buffer" -version = "0.7.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "block-padding 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -178,6 +178,30 @@ dependencies = [ "generic-array 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "dynasm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.26 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.26 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "dynasmrt" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "take_mut 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "errno" version = "0.2.4" @@ -327,6 +351,15 @@ dependencies = [ "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "memmap" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "memmap" version = "0.7.0" @@ -365,6 +398,14 @@ name = "opaque-debug" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "owning_ref" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "owning_ref" version = "0.4.0" @@ -625,7 +666,7 @@ name = "sha2" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "block-buffer 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "block-buffer 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "digest 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "opaque-debug 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -690,6 +731,11 @@ dependencies = [ "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "take_mut" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "target-lexicon" version = "0.2.0" @@ -825,8 +871,10 @@ dependencies = [ name = "wasmer-dynasm-backend" version = "0.1.0" dependencies = [ + "dynasm 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "dynasmrt 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "wasmer-runtime-core 0.1.2", - "wasmparser 0.23.0 (registry+https://github.com/rust-lang/crates.io-index)", + "wasmparser 0.28.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -894,6 +942,11 @@ name = "wasmparser" version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "wasmparser" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "winapi" version = "0.2.8" @@ -928,7 +981,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9a7d5b8723950951411ee34d271d99dddcc2035a16ab25310ea2c8cfd4369652" "checksum autocfg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6d640bee2da49f60a4068a7fae53acde8982514ab7bae8b8cea9e88cbcfd799" "checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12" -"checksum block-buffer 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "509de513cca6d92b6aacf9c61acfe7eaa160837323a81068d690cc1f8e5740da" +"checksum block-buffer 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49665c62e0e700857531fa5d3763e91b539ff1abeebd56808d378b495870d60d" "checksum block-padding 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d75255892aeb580d3c566f213a2b6fdc1c66667839f45719ee1d30ebf2aea591" "checksum byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" "checksum byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a019b10a2a7cdeb292db131fc8113e57ea2a908f6e7894b0c3c671893b65dbeb" @@ -946,6 +999,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum cranelift-native 0.26.0 (registry+https://github.com/rust-lang/crates.io-index)" = "474bee81d620a473bf43411a3d6f10ffbf7965141dc5e5b76d8d2151dde3285d" "checksum cranelift-wasm 0.26.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49723365dab9a48b354bdc24cb6d9d5719bc1d3b858ffd2ea179d0d7d885804a" "checksum digest 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05f47366984d3ad862010e22c7ce81a7dbcaebbdfb37241a620f8b6596ee135c" +"checksum dynasm 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6aa959a3e40b6c4292053b150062d2abd795b3285c58d4be9b9ad3ef2264a26f" +"checksum dynasmrt 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a4c408a211e7f5762829f5e46bdff0c14bc3b1517a21a4bb781c716bf88b0c68" "checksum errno 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "c2a071601ed01b988f896ab14b95e67335d1eeb50190932a1320f7fe3cadc84e" "checksum errno-dragonfly 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067" "checksum failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "795bd83d3abeb9220f257e597aa0080a508b27533824adf336529648f6abf7e2" @@ -967,10 +1022,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum libc 0.2.48 (registry+https://github.com/rust-lang/crates.io-index)" = "e962c7641008ac010fa60a7dfdc1712449f29c44ef2d4702394aea943ee75047" "checksum lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" "checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6" +"checksum memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e2ffa2c986de11a9df78620c01eeaaf27d94d3ff02bf81bfcca953102dd0c6ff" "checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" "checksum nix 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "921f61dc817b379d0834e45d5ec45beaacfae97082090a49c2cf30dcbc30206f" "checksum nix 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46f0f3210768d796e8fa79ec70ee6af172dacbe7147f5e69be5240a47778302b" "checksum opaque-debug 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "93f5bb2e8e8dec81642920ccff6b61f1eb94fa3020c5a325c9851ff604152409" +"checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37" "checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13" "checksum page_size 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f89ef58b3d32420dbd1a43d2f38ae92f6239ef12bb556ab09ca55445f5a67242" "checksum parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" @@ -1009,6 +1066,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum structopt-derive 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "ef98172b1a00b0bec738508d3726540edcbd186d50dfd326f2b1febbb3559f04" "checksum syn 0.15.26 (registry+https://github.com/rust-lang/crates.io-index)" = "f92e629aa1d9c827b2bb8297046c1ccffc57c99b947a680d3ccff1f136a3bee9" "checksum synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "73687139bf99285483c96ac0add482c3776528beac1d97d444f6e91f203a2015" +"checksum take_mut 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" "checksum target-lexicon 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4af5e2227f0b887d591d3724b796a96eff04226104d872f5b3883fcd427d64b9" "checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096" "checksum textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "307686869c93e71f94da64286f9a9524c0f308a9e1c87a583de8e9c9039ad3f6" @@ -1024,6 +1082,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum wabt-sys 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a6265b25719e82598d104b3717375e37661d41753e2c84cde3f51050c7ed7e3c" "checksum wasmparser 0.22.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f46e666ecb4a406483a59a49f9d0c17f327e70da53a128eccddae2eadb95865c" "checksum wasmparser 0.23.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b5e01c420bc7d36e778bd242e1167b079562ba8b34087122cc9057187026d060" +"checksum wasmparser 0.28.0 (registry+https://github.com/rust-lang/crates.io-index)" = "40f426b1929bd26517fb10702e2a8e520d1845c49567aa4d244f426f10b206c1" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "92c1eb33641e276cfa214a0522acad57be5c56b10cb348b3c5117db75f3ac4b0" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" diff --git a/lib/dynasm-backend/Cargo.toml b/lib/dynasm-backend/Cargo.toml index da6b2a9e57a..005e232bb1d 100644 --- a/lib/dynasm-backend/Cargo.toml +++ b/lib/dynasm-backend/Cargo.toml @@ -8,5 +8,7 @@ authors = ["The Wasmer Engineering Team "] edition = "2018" [dependencies] -wasmer-runtime-core = { path = "../runtime-core", version = "0.1.2" } -wasmparser = "0.23.0" +wasmer-runtime-core = { path = "../runtime-core" } +wasmparser = "0.28.0" +dynasm = "0.3.0" +dynasmrt = "0.3.1" From bbb27bedbe4e237f212aa165b5287dbb73df8746 Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 12 Feb 2019 00:51:49 +0800 Subject: [PATCH 003/100] Code generation framework. --- lib/dynasm-backend/src/codegen.rs | 17 ++ lib/dynasm-backend/src/parse.rs | 395 ++++++++++++++++++++++++++++++ 2 files changed, 412 insertions(+) create mode 100644 lib/dynasm-backend/src/codegen.rs create mode 100644 lib/dynasm-backend/src/parse.rs diff --git a/lib/dynasm-backend/src/codegen.rs b/lib/dynasm-backend/src/codegen.rs new file mode 100644 index 00000000000..e1d690ddd95 --- /dev/null +++ b/lib/dynasm-backend/src/codegen.rs @@ -0,0 +1,17 @@ +use wasmparser::{Operator, Type as WpType}; + +pub trait ModuleCodeGenerator { + fn next_function(&mut self) -> Result<&mut FCG, CodegenError>; +} + +pub trait FunctionCodeGenerator { + fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError>; + fn feed_local(&mut self, ty: WpType, n: usize) -> Result<(), CodegenError>; + fn feed_opcode(&mut self, op: Operator) -> Result<(), CodegenError>; + fn finalize(&mut self) -> Result<(), CodegenError>; +} + +#[derive(Debug)] +pub struct CodegenError { + pub message: &'static str, +} diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs new file mode 100644 index 00000000000..32fa18ee063 --- /dev/null +++ b/lib/dynasm-backend/src/parse.rs @@ -0,0 +1,395 @@ +use crate::codegen::{CodegenError, FunctionCodeGenerator, ModuleCodeGenerator}; +use std::sync::Arc; +use wasmer_runtime_core::{ + backend::Backend, + module::{ + DataInitializer, ExportIndex, ImportName, ModuleInfo, StringTable, StringTableBuilder, + TableInitializer, + }, + structures::{Map, TypedIndex}, + types::{ + ElementType, FuncIndex, FuncSig, GlobalDescriptor, GlobalIndex, GlobalInit, + ImportedGlobalIndex, Initializer, MemoryDescriptor, MemoryIndex, SigIndex, TableDescriptor, + TableIndex, Type, Value, + }, + units::Pages, +}; +use wasmparser::{ + BinaryReaderError, CodeSectionReader, Data, DataKind, Element, ElementKind, Export, + ExternalKind, FuncType, Import, ImportSectionEntryType, InitExpr, ModuleReader, Operator, + SectionCode, Type as WpType, +}; + +#[derive(Debug)] +pub enum LoadError { + Parse(BinaryReaderError), + Codegen(CodegenError), +} + +impl From for LoadError { + fn from(other: BinaryReaderError) -> LoadError { + LoadError::Parse(other) + } +} + +impl From for LoadError { + fn from(other: CodegenError) -> LoadError { + LoadError::Codegen(other) + } +} + +pub fn read_module, FCG: FunctionCodeGenerator>( + wasm: &[u8], + backend: Backend, + mut mcg: MCG, +) -> Result { + let mut info = ModuleInfo { + memories: Map::new(), + globals: Map::new(), + tables: Map::new(), + + imported_functions: Map::new(), + imported_memories: Map::new(), + imported_tables: Map::new(), + imported_globals: Map::new(), + + exports: Default::default(), + + data_initializers: Vec::new(), + elem_initializers: Vec::new(), + + start_func: None, + + func_assoc: Map::new(), + signatures: Map::new(), + backend: backend, + + namespace_table: StringTable::new(), + name_table: StringTable::new(), + }; + + let mut reader = ModuleReader::new(wasm)?; + + loop { + if reader.eof() { + return Ok(info); + } + + let section = reader.read()?; + + match section.code { + SectionCode::Type => { + let type_reader = section.get_type_section_reader()?; + + for ty in type_reader { + let ty = ty?; + info.signatures.push(Arc::new(func_type_to_func_sig(ty)?)); + } + } + SectionCode::Import => { + let import_reader = section.get_import_section_reader()?; + let mut namespace_builder = StringTableBuilder::new(); + let mut name_builder = StringTableBuilder::new(); + + for import in import_reader { + let Import { module, field, ty } = import?; + + let namespace_index = namespace_builder.register(module); + let name_index = name_builder.register(field); + let import_name = ImportName { + namespace_index, + name_index, + }; + + match ty { + ImportSectionEntryType::Function(sigindex) => { + let sigindex = SigIndex::new(sigindex as usize); + info.imported_functions.push(import_name); + info.func_assoc.push(sigindex); + } + ImportSectionEntryType::Table(table_ty) => { + assert_eq!(table_ty.element_type, WpType::AnyFunc); + let table_desc = TableDescriptor { + element: ElementType::Anyfunc, + minimum: table_ty.limits.initial, + maximum: table_ty.limits.maximum, + }; + + info.imported_tables.push((import_name, table_desc)); + } + ImportSectionEntryType::Memory(memory_ty) => { + let mem_desc = MemoryDescriptor { + minimum: Pages(memory_ty.limits.initial), + maximum: memory_ty.limits.maximum.map(|max| Pages(max)), + shared: memory_ty.shared, + }; + info.imported_memories.push((import_name, mem_desc)); + } + ImportSectionEntryType::Global(global_ty) => { + let global_desc = GlobalDescriptor { + mutable: global_ty.mutable, + ty: wp_type_to_type(global_ty.content_type)?, + }; + info.imported_globals.push((import_name, global_desc)); + } + } + } + } + SectionCode::Function => { + let func_decl_reader = section.get_function_section_reader()?; + + for sigindex in func_decl_reader { + let sigindex = sigindex?; + + let sigindex = SigIndex::new(sigindex as usize); + info.func_assoc.push(sigindex); + } + } + SectionCode::Table => { + let table_decl_reader = section.get_table_section_reader()?; + + for table_ty in table_decl_reader { + let table_ty = table_ty?; + + let table_desc = TableDescriptor { + element: ElementType::Anyfunc, + minimum: table_ty.limits.initial, + maximum: table_ty.limits.maximum, + }; + + info.tables.push(table_desc); + } + } + SectionCode::Memory => { + let mem_decl_reader = section.get_memory_section_reader()?; + + for memory_ty in mem_decl_reader { + let memory_ty = memory_ty?; + + let mem_desc = MemoryDescriptor { + minimum: Pages(memory_ty.limits.initial), + maximum: memory_ty.limits.maximum.map(|max| Pages(max)), + shared: memory_ty.shared, + }; + + info.memories.push(mem_desc); + } + } + SectionCode::Global => { + let global_decl_reader = section.get_global_section_reader()?; + + for global in global_decl_reader { + let global = global?; + + let desc = GlobalDescriptor { + mutable: global.ty.mutable, + ty: wp_type_to_type(global.ty.content_type)?, + }; + + let global_init = GlobalInit { + desc, + init: eval_init_expr(&global.init_expr)?, + }; + + info.globals.push(global_init); + } + } + SectionCode::Export => { + let export_reader = section.get_export_section_reader()?; + + for export in export_reader { + let Export { field, kind, index } = export?; + + let export_index = match kind { + ExternalKind::Function => ExportIndex::Func(FuncIndex::new(index as usize)), + ExternalKind::Table => ExportIndex::Table(TableIndex::new(index as usize)), + ExternalKind::Memory => { + ExportIndex::Memory(MemoryIndex::new(index as usize)) + } + ExternalKind::Global => { + ExportIndex::Global(GlobalIndex::new(index as usize)) + } + }; + + info.exports.insert(field.to_string(), export_index); + } + } + SectionCode::Start => { + let start_index = section.get_start_section_content()?; + + info.start_func = Some(FuncIndex::new(start_index as usize)); + } + SectionCode::Element => { + let element_reader = section.get_element_section_reader()?; + + for element in element_reader { + let Element { kind, items } = element?; + + match kind { + ElementKind::Active { + table_index, + init_expr, + } => { + let table_index = TableIndex::new(table_index as usize); + let base = eval_init_expr(&init_expr)?; + let items_reader = items.get_items_reader()?; + + let elements: Vec<_> = items_reader + .into_iter() + .map(|res| res.map(|index| FuncIndex::new(index as usize))) + .collect::>()?; + + let table_init = TableInitializer { + table_index, + base, + elements, + }; + + info.elem_initializers.push(table_init); + } + ElementKind::Passive(_ty) => { + return Err(BinaryReaderError { + message: "passive tables are not yet supported", + offset: -1isize as usize, + } + .into()); + } + } + } + } + SectionCode::Code => { + let mut code_reader = section.get_code_section_reader()?; + if code_reader.get_count() as usize != info.func_assoc.len() { + return Err(BinaryReaderError { + message: "code_reader.get_count() != info.func_assoc.len()", + offset: ::std::usize::MAX, + } + .into()); + } + for i in 0..code_reader.get_count() { + let item = code_reader.read()?; + let mut fcg = mcg.next_function()?; + for param in info + .signatures + .get(*info.func_assoc.get(FuncIndex::new(i as usize)).unwrap()) + .unwrap() + .params() + { + fcg.feed_param(type_to_wp_type(*param))?; + } + for local in item.get_locals_reader()? { + let (count, ty) = local?; + fcg.feed_local(ty, count as usize)?; + } + for op in item.get_operators_reader()? { + let op = op?; + fcg.feed_opcode(op)?; + } + fcg.finalize()?; + } + } + SectionCode::Data => { + let data_reader = section.get_data_section_reader()?; + + for data in data_reader { + let Data { kind, data } = data?; + + match kind { + DataKind::Active { + memory_index, + init_expr, + } => { + let memory_index = MemoryIndex::new(memory_index as usize); + let base = eval_init_expr(&init_expr)?; + + let data_init = DataInitializer { + memory_index, + base, + data: data.to_vec(), + }; + + info.data_initializers.push(data_init); + } + DataKind::Passive => { + return Err(BinaryReaderError { + message: "passive memories are not yet supported", + offset: -1isize as usize, + } + .into()); + } + } + } + } + SectionCode::DataCount => {} + SectionCode::Custom { .. } => {} + } + } +} + +pub fn wp_type_to_type(ty: WpType) -> Result { + Ok(match ty { + WpType::I32 => Type::I32, + WpType::I64 => Type::I64, + WpType::F32 => Type::F32, + WpType::F64 => Type::F64, + WpType::V128 => { + return Err(BinaryReaderError { + message: "the wasmer llvm backend does not yet support the simd extension", + offset: -1isize as usize, + }); + } + _ => panic!("broken invariant, invalid type"), + }) +} + +pub fn type_to_wp_type(ty: Type) -> WpType { + match ty { + Type::I32 => WpType::I32, + Type::I64 => WpType::I64, + Type::F32 => WpType::F32, + Type::F64 => WpType::F64, + } +} + +fn func_type_to_func_sig(func_ty: FuncType) -> Result { + assert_eq!(func_ty.form, WpType::Func); + + Ok(FuncSig::new( + func_ty + .params + .iter() + .cloned() + .map(wp_type_to_type) + .collect::, _>>()?, + func_ty + .returns + .iter() + .cloned() + .map(wp_type_to_type) + .collect::, _>>()?, + )) +} + +fn eval_init_expr(expr: &InitExpr) -> Result { + let mut reader = expr.get_operators_reader(); + let (op, offset) = reader.read_with_offset()?; + Ok(match op { + Operator::GetGlobal { global_index } => { + Initializer::GetGlobal(ImportedGlobalIndex::new(global_index as usize)) + } + Operator::I32Const { value } => Initializer::Const(Value::I32(value)), + Operator::I64Const { value } => Initializer::Const(Value::I64(value)), + Operator::F32Const { value } => { + Initializer::Const(Value::F32(f32::from_bits(value.bits()))) + } + Operator::F64Const { value } => { + Initializer::Const(Value::F64(f64::from_bits(value.bits()))) + } + _ => { + return Err(BinaryReaderError { + message: "init expr evaluation failed: unsupported opcode", + offset, + }); + } + }) +} From ffc1bde3d815ef3f3620398df8b0349ae897df1f Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 12 Feb 2019 00:52:01 +0800 Subject: [PATCH 004/100] Cleanup & add imports. --- lib/dynasm-backend/src/lib.rs | 307 +--------------------------------- 1 file changed, 8 insertions(+), 299 deletions(-) diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index 538cf50a9f4..b421ad7f77b 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -1,302 +1,11 @@ -use std::ptr::NonNull; -use std::sync::Arc; -use wasmer_runtime_core::{ - backend::{Backend, Compiler, FuncResolver, ProtectedCaller, Token}, - error::{CompileError, CompileResult, RuntimeResult}, - module::{ - DataInitializer, ExportIndex, ImportName, ModuleInfo, ModuleInner, StringTable, - TableInitializer, - }, - structures::{Map, TypedIndex}, - types::{ - ElementType, FuncIndex, FuncSig, GlobalDescriptor, GlobalIndex, GlobalInit, - ImportedGlobalIndex, Initializer, LocalFuncIndex, MemoryDescriptor, MemoryIndex, - TableDescriptor, TableIndex, Type as CoreType, Value, - }, - units::Pages, - vm::{self, ImportBacking}, -}; -use wasmparser::{ - self, ExternalKind, FuncType, ImportSectionEntryType, InitExpr, MemoryType, ModuleReader, - Operator, SectionCode, TableType, Type, WasmDecoder, -}; +#![feature(proc_macro_hygiene)] -struct Placeholder; +#[macro_use] +extern crate dynasmrt; -impl FuncResolver for Placeholder { - fn get( - &self, - _module: &ModuleInner, - _local_func_index: LocalFuncIndex, - ) -> Option> { - None - } -} +#[macro_use] +extern crate dynasm; -impl ProtectedCaller for Placeholder { - fn call( - &self, - _module: &ModuleInner, - _func_index: FuncIndex, - _params: &[Value], - _import_backing: &ImportBacking, - _vmctx: *mut vm::Ctx, - _: Token, - ) -> RuntimeResult> { - Ok(vec![]) - } -} - -pub struct DynasmCompiler {} - -impl DynasmCompiler { - pub fn new() -> DynasmCompiler { - DynasmCompiler {} - } -} - -impl Compiler for DynasmCompiler { - fn compile(&self, wasm: &[u8], _: Token) -> CompileResult { - validate(wasm)?; - - let mut reader = ModuleReader::new(wasm)?; - let mut m = ModuleInner { - // this is a placeholder - func_resolver: Box::new(Placeholder), - protected_caller: Box::new(Placeholder), - - info: ModuleInfo { - memories: Map::new(), - globals: Map::new(), - tables: Map::new(), - - imported_functions: Map::new(), - imported_memories: Map::new(), - imported_tables: Map::new(), - imported_globals: Map::new(), - - exports: Default::default(), - - data_initializers: Vec::new(), - elem_initializers: Vec::new(), - - start_func: None, - - func_assoc: Map::new(), - signatures: Map::new(), - backend: Backend::Cranelift, - - namespace_table: StringTable::new(), - name_table: StringTable::new(), - }, - }; - let mut types: Vec = Vec::new(); - - loop { - if reader.eof() { - return Ok(m); - } - let section = reader.read()?; - match section.code { - SectionCode::Custom { .. } => {} - SectionCode::Type => { - let mut ty_reader = section.get_type_section_reader()?; - let count = ty_reader.get_count(); - for _ in 0..count { - types.push(ty_reader.read()?); - } - } - SectionCode::Import => { - let mut imp_reader = section.get_import_section_reader()?; - let count = imp_reader.get_count(); - for _ in 0..count { - let imp = imp_reader.read()?; - // FIXME: not implemented - } - } - SectionCode::Function => { - let mut func_reader = section.get_function_section_reader()?; - let count = func_reader.get_count(); - for _ in 0..count { - let ty_id = func_reader.read()? as usize; - m.info.signatures.push(Arc::new(FuncSig::new( - types[ty_id] - .params - .iter() - .cloned() - .map(CoreType::from_wasmparser_type) - .collect::>>()?, - types[ty_id] - .returns - .iter() - .cloned() - .map(CoreType::from_wasmparser_type) - .collect::>>()?, - ))); - } - } - SectionCode::Table => { - let mut table_reader = section.get_table_section_reader()?; - let count = table_reader.get_count(); - for _ in 0..count { - let tt = table_reader.read()?; - if tt.element_type != Type::AnyFunc { - return Err(CompileError::InternalError { - msg: "unsupported table element type".into(), - }); - } - m.info.tables.push(TableDescriptor { - element: ElementType::Anyfunc, - minimum: tt.limits.initial, - maximum: tt.limits.maximum, - }); - } - } - SectionCode::Memory => { - let mut mem_reader = section.get_memory_section_reader()?; - let count = mem_reader.get_count(); - for _ in 0..count { - let mem_info = mem_reader.read()?; - m.info.memories.push(MemoryDescriptor { - minimum: Pages(mem_info.limits.initial), - maximum: mem_info.limits.maximum.map(Pages), - shared: mem_info.shared, - }); - } - } - SectionCode::Global => { - let mut global_reader = section.get_global_section_reader()?; - let count = global_reader.get_count(); - for _ in 0..count { - let info = global_reader.read()?; - m.info.globals.push(GlobalInit { - desc: GlobalDescriptor { - mutable: info.ty.mutable, - ty: CoreType::from_wasmparser_type(info.ty.content_type)?, - }, - init: eval_init_expr(&info.init_expr)?, - }); - } - } - SectionCode::Export => { - let mut export_reader = section.get_export_section_reader()?; - let count = export_reader.get_count(); - for _ in 0..count { - let v = export_reader.read()?; - m.info.exports.insert( - match ::std::str::from_utf8(v.field) { - Ok(x) => x.to_string(), - Err(_) => { - return Err(CompileError::InternalError { - msg: "field name not in utf-8".into(), - }) - } - }, - match v.kind { - ExternalKind::Function => { - ExportIndex::Func(FuncIndex::new(v.index as usize)) - } - ExternalKind::Global => { - ExportIndex::Global(GlobalIndex::new(v.index as usize)) - } - ExternalKind::Memory => { - ExportIndex::Memory(MemoryIndex::new(v.index as usize)) - } - ExternalKind::Table => { - ExportIndex::Table(TableIndex::new(v.index as usize)) - } - }, - ); - } - } - SectionCode::Start => { - m.info.start_func = - Some(FuncIndex::new(section.get_start_section_content()? as usize)); - } - SectionCode::Element => { - let mut element_reader = section.get_element_section_reader()?; - let count = element_reader.get_count(); - for _ in 0..count { - let elem = element_reader.read()?; - let table_index = elem.table_index as usize; - - let mut item_reader = elem.items.get_items_reader()?; - let item_count = item_reader.get_count() as usize; - - m.info.elem_initializers.push(TableInitializer { - table_index: TableIndex::new(table_index), - base: eval_init_expr(&elem.init_expr)?, - elements: (0..item_count) - .map(|_| Ok(FuncIndex::new(item_reader.read()? as usize))) - .collect::>()?, - }); - } - } - SectionCode::Code => { - let mut code_reader = section.get_code_section_reader()?; - let count = code_reader.get_count() as usize; - - if count != m.info.signatures.len() { - return Err(CompileError::InternalError { - msg: "len(function_bodies) != len(functions)".into(), - }); - } - - for i in 0..count { - let body = code_reader.read()?; - // FIXME: not implemented - } - } - SectionCode::Data => { - let mut data_reader = section.get_data_section_reader()?; - let count = data_reader.get_count(); - for _ in 0..count { - let initializer = data_reader.read()?; - m.info.data_initializers.push(DataInitializer { - memory_index: MemoryIndex::new(initializer.memory_index as usize), - base: eval_init_expr(&initializer.init_expr)?, - data: initializer.data.to_vec(), - }); - } - } - } - } - } -} - -fn validate(bytes: &[u8]) -> CompileResult<()> { - let mut parser = wasmparser::ValidatingParser::new(bytes, None); - loop { - let state = parser.read(); - match *state { - wasmparser::ParserState::EndWasm => break Ok(()), - wasmparser::ParserState::Error(err) => Err(CompileError::ValidationError { - msg: err.message.to_string(), - })?, - _ => {} - } - } -} - -fn eval_init_expr(expr: &InitExpr) -> CompileResult { - let mut reader = expr.get_operators_reader(); - let op = reader.read()?; - Ok(match op { - Operator::GetGlobal { global_index } => { - Initializer::GetGlobal(ImportedGlobalIndex::new(global_index as usize)) - } - Operator::I32Const { value } => Initializer::Const(Value::I32(value)), - Operator::I64Const { value } => Initializer::Const(Value::I64(value)), - Operator::F32Const { value } => { - Initializer::Const(Value::F32(unsafe { ::std::mem::transmute(value.bits()) })) - } - Operator::F64Const { value } => { - Initializer::Const(Value::F64(unsafe { ::std::mem::transmute(value.bits()) })) - } - _ => { - return Err(CompileError::InternalError { - msg: "init expr evaluation failed: unsupported opcode".into(), - }) - } - }) -} +mod codegen; +mod codegen_x64; +mod parse; From 2fbb5e333212a680315a94b18e5f7e1f4ee6c18a Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 12 Feb 2019 00:52:17 +0800 Subject: [PATCH 005/100] Codegen backend for x64. --- lib/dynasm-backend/src/codegen_x64.rs | 99 +++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 lib/dynasm-backend/src/codegen_x64.rs diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs new file mode 100644 index 00000000000..5675fac1be8 --- /dev/null +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -0,0 +1,99 @@ +use super::codegen::*; +use dynasmrt::{x64::Assembler, DynasmApi}; +use wasmparser::{Operator, Type as WpType}; + +#[derive(Default)] +pub struct X64ModuleCodeGenerator { + functions: Vec, +} + +pub struct X64FunctionCode { + assembler: Option, + locals: Vec, + current_stack_offset: usize, +} + +struct Local { + ty: WpType, + stack_offset: usize, +} + +impl X64ModuleCodeGenerator { + pub fn new() -> X64ModuleCodeGenerator { + X64ModuleCodeGenerator::default() + } +} + +impl ModuleCodeGenerator for X64ModuleCodeGenerator { + fn next_function(&mut self) -> Result<&mut X64FunctionCode, CodegenError> { + let code = X64FunctionCode { + assembler: Some(match self.functions.last_mut() { + Some(x) => x.assembler.take().unwrap(), + None => match Assembler::new() { + Ok(x) => x, + Err(_) => { + return Err(CodegenError { + message: "cannot initialize assembler", + }) + } + }, + }), + locals: vec![], + current_stack_offset: 0, + }; + self.functions.push(code); + Ok(self.functions.last_mut().unwrap()) + } +} + +impl FunctionCodeGenerator for X64FunctionCode { + fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError> { + let size = get_size_of_type(&ty)?; + self.current_stack_offset -= size; + self.locals.push(Local { + ty: ty, + stack_offset: self.current_stack_offset, + }); + // TODO: load parameter values onto stack... + Ok(()) + } + fn feed_local(&mut self, ty: WpType, n: usize) -> Result<(), CodegenError> { + let size = get_size_of_type(&ty)?; + let assembler = self.assembler.as_mut().unwrap(); + + dynasm!( + assembler + ; xor rax, rax + ); + + for _ in 0..n { + // FIXME: check range of n + self.current_stack_offset -= size; + self.locals.push(Local { + ty: ty, + stack_offset: self.current_stack_offset, + }); + dynasm!( + assembler + ; mov [rsp - (self.current_stack_offset as i32)], rax + ); + } + Ok(()) + } + fn feed_opcode(&mut self, op: Operator) -> Result<(), CodegenError> { + Ok(()) + } + fn finalize(&mut self) -> Result<(), CodegenError> { + Ok(()) + } +} + +fn get_size_of_type(ty: &WpType) -> Result { + match *ty { + WpType::I32 | WpType::F32 => Ok(4), + WpType::I64 | WpType::F64 => Ok(8), + _ => Err(CodegenError { + message: "unknown type", + }), + } +} From 4ebb22f8bc00956fb670d290449ffc338450cfba Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 12 Feb 2019 23:15:57 +0800 Subject: [PATCH 006/100] Calling conventions, value stack, and runtime stack layout. --- lib/dynasm-backend/src/codegen.rs | 1 + lib/dynasm-backend/src/codegen_x64.rs | 123 +++++++++++++++++++++----- lib/dynasm-backend/src/lib.rs | 1 + lib/dynasm-backend/src/parse.rs | 1 + lib/dynasm-backend/src/stack.rs | 109 +++++++++++++++++++++++ 5 files changed, 212 insertions(+), 23 deletions(-) create mode 100644 lib/dynasm-backend/src/stack.rs diff --git a/lib/dynasm-backend/src/codegen.rs b/lib/dynasm-backend/src/codegen.rs index e1d690ddd95..1f39e02f2c5 100644 --- a/lib/dynasm-backend/src/codegen.rs +++ b/lib/dynasm-backend/src/codegen.rs @@ -7,6 +7,7 @@ pub trait ModuleCodeGenerator { pub trait FunctionCodeGenerator { fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError>; fn feed_local(&mut self, ty: WpType, n: usize) -> Result<(), CodegenError>; + fn begin_body(&mut self) -> Result<(), CodegenError>; fn feed_opcode(&mut self, op: Operator) -> Result<(), CodegenError>; fn finalize(&mut self) -> Result<(), CodegenError>; } diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 5675fac1be8..e866097f8d6 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -1,5 +1,6 @@ use super::codegen::*; -use dynasmrt::{x64::Assembler, DynasmApi}; +use super::stack::ValueStack; +use dynasmrt::{x64::Assembler, DynamicLabel, DynasmApi, DynasmLabelApi}; use wasmparser::{Operator, Type as WpType}; #[derive(Default)] @@ -8,11 +9,18 @@ pub struct X64ModuleCodeGenerator { } pub struct X64FunctionCode { + id: usize, + begin_label: DynamicLabel, + cleanup_label: DynamicLabel, assembler: Option, locals: Vec, + num_params: usize, current_stack_offset: usize, + callee_managed_stack_offset: usize, + value_stack: ValueStack, } +#[derive(Copy, Clone, Debug)] struct Local { ty: WpType, stack_offset: usize, @@ -26,20 +34,32 @@ impl X64ModuleCodeGenerator { impl ModuleCodeGenerator for X64ModuleCodeGenerator { fn next_function(&mut self) -> Result<&mut X64FunctionCode, CodegenError> { + let mut assembler = match self.functions.last_mut() { + Some(x) => x.assembler.take().unwrap(), + None => match Assembler::new() { + Ok(x) => x, + Err(_) => { + return Err(CodegenError { + message: "cannot initialize assembler", + }) + } + }, + }; + let begin_label = assembler.new_dynamic_label(); + dynasm!( + assembler + ; => begin_label + ); let code = X64FunctionCode { - assembler: Some(match self.functions.last_mut() { - Some(x) => x.assembler.take().unwrap(), - None => match Assembler::new() { - Ok(x) => x, - Err(_) => { - return Err(CodegenError { - message: "cannot initialize assembler", - }) - } - }, - }), + id: self.functions.len(), + begin_label: begin_label, + cleanup_label: assembler.new_dynamic_label(), + assembler: Some(assembler), locals: vec![], + num_params: 0, current_stack_offset: 0, + callee_managed_stack_offset: 0, + value_stack: ValueStack::new(13), }; self.functions.push(code); Ok(self.functions.last_mut().unwrap()) @@ -49,41 +69,98 @@ impl ModuleCodeGenerator for X64ModuleCodeGenerator { impl FunctionCodeGenerator for X64FunctionCode { fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError> { let size = get_size_of_type(&ty)?; - self.current_stack_offset -= size; + self.current_stack_offset += size; self.locals.push(Local { ty: ty, stack_offset: self.current_stack_offset, }); - // TODO: load parameter values onto stack... + self.num_params += 1; Ok(()) } fn feed_local(&mut self, ty: WpType, n: usize) -> Result<(), CodegenError> { let size = get_size_of_type(&ty)?; - let assembler = self.assembler.as_mut().unwrap(); - - dynasm!( - assembler - ; xor rax, rax - ); - for _ in 0..n { // FIXME: check range of n - self.current_stack_offset -= size; + self.current_stack_offset += size; + self.callee_managed_stack_offset += size; self.locals.push(Local { ty: ty, stack_offset: self.current_stack_offset, }); + } + Ok(()) + } + fn begin_body(&mut self) -> Result<(), CodegenError> { + let assembler = self.assembler.as_mut().unwrap(); + dynasm!( + assembler + ; mov rax, rsp + ; sub rsp, self.callee_managed_stack_offset as i32 + ; xor rcx, rcx + ); + + for local in &self.locals[self.num_params..] { + let size = get_size_of_type(&local.ty)?; dynasm!( assembler - ; mov [rsp - (self.current_stack_offset as i32)], rax + ; sub rax, size as i32 ); + if size == 4 { + dynasm!( + assembler + ; mov [rax], ecx + ); + } else if size == 8 { + dynasm!( + assembler + ; mov [rax], rcx + ); + } else { + return Err(CodegenError { + message: "unsupported size for type", + }); + } } + dynasm!( + assembler + ; push rbp + ; mov rbp, rsp + ); Ok(()) } fn feed_opcode(&mut self, op: Operator) -> Result<(), CodegenError> { + let assembler = self.assembler.as_mut().unwrap(); + match op { + Operator::GetLocal { local_index } => { + let local_index = local_index as usize; + if local_index >= self.locals.len() { + return Err(CodegenError { + message: "local out of bounds", + }); + } + let local = self.locals[local_index]; + dynasm!( + assembler + ; mov rax, rbp + ; add rax, (self.current_stack_offset - local.stack_offset) as i32 + // TODO: How should we dynamically specify a register? + ); + } + _ => unimplemented!(), + } Ok(()) } fn finalize(&mut self) -> Result<(), CodegenError> { + let assembler = self.assembler.as_mut().unwrap(); + dynasm!( + assembler + ; ud2 + ; => self.cleanup_label + ; mov rsp, rbp + ; pop rbp + ; add rsp, self.current_stack_offset as i32 + ; ret + ); Ok(()) } } diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index b421ad7f77b..95aa18e8f55 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -9,3 +9,4 @@ extern crate dynasm; mod codegen; mod codegen_x64; mod parse; +mod stack; diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs index 32fa18ee063..bc5ef9a3278 100644 --- a/lib/dynasm-backend/src/parse.rs +++ b/lib/dynasm-backend/src/parse.rs @@ -281,6 +281,7 @@ pub fn read_module, FCG: FunctionCodeGenerator>( let (count, ty) = local?; fcg.feed_local(ty, count as usize)?; } + fcg.begin_body()?; for op in item.get_operators_reader()? { let op = op?; fcg.feed_opcode(op)?; diff --git a/lib/dynasm-backend/src/stack.rs b/lib/dynasm-backend/src/stack.rs new file mode 100644 index 00000000000..16937fc7c9a --- /dev/null +++ b/lib/dynasm-backend/src/stack.rs @@ -0,0 +1,109 @@ +use dynasmrt::DynamicLabel; +use wasmparser::Type as WpType; + +/*#[repr(u8)] +#[derive(Copy, Clone, Debug)] +pub enum RegisterName { + RDI, + RSI, + RDX, + RCX, + R8, + R9, + R10, + R11, + RBX, + R12, + R13, + R14, + R15, + Invalid, +}*/ + +#[derive(Debug)] +pub struct ControlFrame { + pub label: DynamicLabel, + pub loop_like: bool, + pub returns: Vec, + pub value_stack_depth_before: usize, +} + +#[derive(Debug)] +pub struct ControlStack { + pub frames: Vec, +} + +#[derive(Debug)] +pub struct ValueStack { + pub num_regs: u8, + pub values: Vec, +} + +#[derive(Copy, Clone, Debug)] +pub struct ValueInfo { + pub ty: WpType, + pub location: ValueLocation, +} + +#[derive(Copy, Clone, Debug)] +pub enum ValueLocation { + Register(u8), + Stack, +} + +impl ValueStack { + pub fn new(num_regs: u8) -> ValueStack { + ValueStack { + num_regs: num_regs, + values: vec![], + } + } + + fn next_location(&self, loc: &ValueLocation) -> ValueLocation { + match *loc { + ValueLocation::Register(x) => { + if x >= self.num_regs - 1 { + ValueLocation::Stack + } else { + ValueLocation::Register(x + 1) + } + } + ValueLocation::Stack => ValueLocation::Stack, + } + } + + pub fn push(&mut self, ty: WpType) -> ValueLocation { + let loc = self + .values + .last() + .map(|x| self.next_location(&x.location)) + .unwrap_or(ValueLocation::Register(0)); + self.values.push(ValueInfo { + ty: ty, + location: loc, + }); + loc + } + + pub fn pop(&mut self) -> Option { + self.values.pop() + } + + pub fn pop2(&mut self) -> Option<(ValueInfo, ValueInfo)> { + if self.values.len() < 2 { + None + } else { + let v2 = self.values.pop().unwrap(); + let v1 = self.values.pop().unwrap(); + Some((v1, v2)) + } + } + + pub fn peek(&self) -> Option { + self.values.last().cloned() + } + + pub fn reset_depth(&mut self, target_depth: usize) { + self.values.truncate(target_depth); + } +} From a69c5b4a14bc584dea45292b9139883eb9e42ded Mon Sep 17 00:00:00 2001 From: losfair Date: Wed, 13 Feb 2019 20:03:54 +0800 Subject: [PATCH 007/100] stack: Helper functions and unified return types --- lib/dynasm-backend/src/stack.rs | 47 ++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 7 deletions(-) diff --git a/lib/dynasm-backend/src/stack.rs b/lib/dynasm-backend/src/stack.rs index 16937fc7c9a..2ace6bd5105 100644 --- a/lib/dynasm-backend/src/stack.rs +++ b/lib/dynasm-backend/src/stack.rs @@ -1,3 +1,4 @@ +use crate::codegen::CodegenError; use dynasmrt::DynamicLabel; use wasmparser::Type as WpType; @@ -51,6 +52,26 @@ pub enum ValueLocation { Stack, } +impl ValueLocation { + pub fn is_register(&self) -> bool { + if let ValueLocation::Register(_) = *self { + true + } else { + false + } + } + + pub fn get_register(&self) -> Result { + if let ValueLocation::Register(id) = *self { + Ok(id) + } else { + Err(CodegenError { + message: "not a register location" + }) + } + } +} + impl ValueStack { pub fn new(num_regs: u8) -> ValueStack { ValueStack { @@ -85,22 +106,34 @@ impl ValueStack { loc } - pub fn pop(&mut self) -> Option { - self.values.pop() + pub fn pop(&mut self) -> Result { + match self.values.pop() { + Some(x) => Ok(x), + None => Err(CodegenError { + message: "no value on top of stack", + }), + } } - pub fn pop2(&mut self) -> Option<(ValueInfo, ValueInfo)> { + pub fn pop2(&mut self) -> Result<(ValueInfo, ValueInfo), CodegenError> { if self.values.len() < 2 { - None + Err(CodegenError { + message: "less than 2 values on top of stack", + }) } else { let v2 = self.values.pop().unwrap(); let v1 = self.values.pop().unwrap(); - Some((v1, v2)) + Ok((v1, v2)) } } - pub fn peek(&self) -> Option { - self.values.last().cloned() + pub fn peek(&self) -> Result { + match self.values.last().cloned() { + Some(x) => Ok(x), + None => Err(CodegenError { + message: "no value on top of stack", + }), + } } pub fn reset_depth(&mut self, target_depth: usize) { From 43df3dd5464045ede532589f7c3e023d9a1ba446 Mon Sep 17 00:00:00 2001 From: losfair Date: Wed, 13 Feb 2019 20:04:10 +0800 Subject: [PATCH 008/100] Use System V calling convention and implement a few opcodes. --- lib/dynasm-backend/src/codegen_x64.rs | 220 ++++++++++++++++++++------ 1 file changed, 173 insertions(+), 47 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index e866097f8d6..e7808b0c4ab 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -1,8 +1,46 @@ use super::codegen::*; -use super::stack::ValueStack; +use super::stack::{ValueInfo, ValueLocation, ValueStack}; use dynasmrt::{x64::Assembler, DynamicLabel, DynasmApi, DynasmLabelApi}; use wasmparser::{Operator, Type as WpType}; +#[repr(u8)] +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum Register { + RAX, + RCX, + RDX, + RBX, + RSP, + RBP, + RSI, + RDI, + R8, + R9, + R10, + R11, + R12, + R13, + R14, + R15, +} + +impl Register { + pub fn from_scratch_reg(id: u8) -> Register { + use self::Register::*; + match id { + 0 => RDI, + 1 => RSI, + 2 => RDX, + 3 => RCX, + 4 => R8, + 5 => R9, + 6 => R10, + 7 => R11, + _ => unreachable!(), + } + } +} + #[derive(Default)] pub struct X64ModuleCodeGenerator { functions: Vec, @@ -16,7 +54,6 @@ pub struct X64FunctionCode { locals: Vec, num_params: usize, current_stack_offset: usize, - callee_managed_stack_offset: usize, value_stack: ValueStack, } @@ -49,6 +86,8 @@ impl ModuleCodeGenerator for X64ModuleCodeGenerator { dynasm!( assembler ; => begin_label + ; push rbp + ; mov rbp, rsp ); let code = X64FunctionCode { id: self.functions.len(), @@ -58,74 +97,99 @@ impl ModuleCodeGenerator for X64ModuleCodeGenerator { locals: vec![], num_params: 0, current_stack_offset: 0, - callee_managed_stack_offset: 0, - value_stack: ValueStack::new(13), + value_stack: ValueStack::new(8), }; self.functions.push(code); Ok(self.functions.last_mut().unwrap()) } } +impl X64FunctionCode { + fn gen_rt_pop(assembler: &mut Assembler, info: &ValueInfo) -> Result<(), CodegenError> { + match info.location { + ValueLocation::Register(_) => {} + ValueLocation::Stack => { + let size = get_size_of_type(&info.ty)?; + dynasm!( + assembler + ; add rsp, size as i32 + ); + } + } + Ok(()) + } +} + impl FunctionCodeGenerator for X64FunctionCode { fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError> { + let assembler = self.assembler.as_mut().unwrap(); let size = get_size_of_type(&ty)?; + self.current_stack_offset += size; self.locals.push(Local { ty: ty, stack_offset: self.current_stack_offset, }); + + let param_reg = match self.num_params { + 0 => Register::RDI, + 1 => Register::RSI, + 2 => Register::RDX, + 3 => Register::RCX, + 4 => Register::R8, + 5 => Register::R9, + _ => { + return Err(CodegenError { + message: "more than 6 function parameters is not yet supported", + }) + } + }; self.num_params += 1; + + if is_dword(size) { + dynasm!( + assembler + ; sub rsp, 4 + ; mov [rsp], Rd(param_reg as u8) + ); + } else { + dynasm!( + assembler + ; sub rsp, 8 + ; mov [rsp], Rq(param_reg as u8) + ); + } + Ok(()) } + fn feed_local(&mut self, ty: WpType, n: usize) -> Result<(), CodegenError> { + let assembler = self.assembler.as_mut().unwrap(); let size = get_size_of_type(&ty)?; for _ in 0..n { // FIXME: check range of n self.current_stack_offset += size; - self.callee_managed_stack_offset += size; self.locals.push(Local { ty: ty, stack_offset: self.current_stack_offset, }); - } - Ok(()) - } - fn begin_body(&mut self) -> Result<(), CodegenError> { - let assembler = self.assembler.as_mut().unwrap(); - dynasm!( - assembler - ; mov rax, rsp - ; sub rsp, self.callee_managed_stack_offset as i32 - ; xor rcx, rcx - ); - - for local in &self.locals[self.num_params..] { - let size = get_size_of_type(&local.ty)?; - dynasm!( - assembler - ; sub rax, size as i32 - ); - if size == 4 { - dynasm!( + match size { + 4 => dynasm!( assembler - ; mov [rax], ecx - ); - } else if size == 8 { - dynasm!( + ; sub rsp, 4 + ; mov DWORD [rsp], 0 + ), + 8 => dynasm!( assembler - ; mov [rax], rcx - ); - } else { - return Err(CodegenError { - message: "unsupported size for type", - }); + ; sub rsp, 8 + ; mov QWORD [rsp], 0 + ), + _ => unreachable!(), } } - dynasm!( - assembler - ; push rbp - ; mov rbp, rsp - ); + Ok(()) + } + fn begin_body(&mut self) -> Result<(), CodegenError> { Ok(()) } fn feed_opcode(&mut self, op: Operator) -> Result<(), CodegenError> { @@ -139,12 +203,71 @@ impl FunctionCodeGenerator for X64FunctionCode { }); } let local = self.locals[local_index]; - dynasm!( - assembler - ; mov rax, rbp - ; add rax, (self.current_stack_offset - local.stack_offset) as i32 - // TODO: How should we dynamically specify a register? - ); + let location = self.value_stack.push(local.ty); + let size = get_size_of_type(&local.ty)?; + + match location { + ValueLocation::Register(id) => { + if is_dword(size) { + dynasm!( + assembler + ; mov Rd(Register::from_scratch_reg(id) as u8), [rbp - (local.stack_offset as i32)] + ); + } else { + dynasm!( + assembler + ; mov Rq(Register::from_scratch_reg(id) as u8), [rbp - (local.stack_offset as i32)] + ); + } + } + ValueLocation::Stack => { + if is_dword(size) { + dynasm!( + assembler + ; mov eax, [rbp - (local.stack_offset as i32)] + ; sub rsp, 4 + ; mov [rsp], eax + ); + } else { + dynasm!( + assembler + ; mov rax, [rbp - (local.stack_offset as i32)] + ; sub rsp, 8 + ; mov [rsp], rax + ); + } + } + } + } + Operator::I32Add => { + let (a, b) = self.value_stack.pop2()?; + if a.ty != WpType::I32 || b.ty != WpType::I32 { + return Err(CodegenError { + message: "I32Add type mismatch", + }); + } + Self::gen_rt_pop(assembler, &b); + Self::gen_rt_pop(assembler, &a); + + self.value_stack.push(WpType::I32); + + if a.location.is_register() && b.location.is_register() { + let (a_reg, b_reg) = ( + Register::from_scratch_reg(a.location.get_register()?), + Register::from_scratch_reg(b.location.get_register()?), + ); + // output is in a_reg. + dynasm!( + assembler + ; add Rd(a_reg as u8), Rd(b_reg as u8) + ); + } else { + unimplemented!(); + } + } + Operator::Drop => { + let info = self.value_stack.pop()?; + Self::gen_rt_pop(assembler, &info)?; } _ => unimplemented!(), } @@ -158,7 +281,6 @@ impl FunctionCodeGenerator for X64FunctionCode { ; => self.cleanup_label ; mov rsp, rbp ; pop rbp - ; add rsp, self.current_stack_offset as i32 ; ret ); Ok(()) @@ -174,3 +296,7 @@ fn get_size_of_type(ty: &WpType) -> Result { }), } } + +fn is_dword(n: usize) -> bool { + n == 4 +} From 7df7204e4b03ae0a9781d6da70a05f48ebcb6c68 Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 14 Feb 2019 00:53:06 +0800 Subject: [PATCH 009/100] codegen: More opcodes --- lib/dynasm-backend/src/codegen.rs | 2 ++ lib/dynasm-backend/src/codegen_x64.rs | 49 +++++++++++++++++++++++++-- 2 files changed, 49 insertions(+), 2 deletions(-) diff --git a/lib/dynasm-backend/src/codegen.rs b/lib/dynasm-backend/src/codegen.rs index 1f39e02f2c5..2695d24352a 100644 --- a/lib/dynasm-backend/src/codegen.rs +++ b/lib/dynasm-backend/src/codegen.rs @@ -2,9 +2,11 @@ use wasmparser::{Operator, Type as WpType}; pub trait ModuleCodeGenerator { fn next_function(&mut self) -> Result<&mut FCG, CodegenError>; + fn finalize(&mut self) -> Result<(), CodegenError>; } pub trait FunctionCodeGenerator { + fn feed_return(&mut self, ty: WpType) -> Result<(), CodegenError>; fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError>; fn feed_local(&mut self, ty: WpType, n: usize) -> Result<(), CodegenError>; fn begin_body(&mut self) -> Result<(), CodegenError>; diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index e7808b0c4ab..c6f3dc49559 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -51,6 +51,7 @@ pub struct X64FunctionCode { begin_label: DynamicLabel, cleanup_label: DynamicLabel, assembler: Option, + returns: Vec, locals: Vec, num_params: usize, current_stack_offset: usize, @@ -94,6 +95,7 @@ impl ModuleCodeGenerator for X64ModuleCodeGenerator { begin_label: begin_label, cleanup_label: assembler.new_dynamic_label(), assembler: Some(assembler), + returns: vec![], locals: vec![], num_params: 0, current_stack_offset: 0, @@ -102,6 +104,15 @@ impl ModuleCodeGenerator for X64ModuleCodeGenerator { self.functions.push(code); Ok(self.functions.last_mut().unwrap()) } + + fn finalize(&mut self) -> Result<(), CodegenError> { + let mut assembler = match self.functions.last_mut() { + Some(x) => x.assembler.take().unwrap(), + None => return Ok(()), + }; + let output = assembler.finalize().unwrap(); + Ok(()) + } } impl X64FunctionCode { @@ -121,6 +132,11 @@ impl X64FunctionCode { } impl FunctionCodeGenerator for X64FunctionCode { + fn feed_return(&mut self, ty: WpType) -> Result<(), CodegenError> { + self.returns.push(ty); + Ok(()) + } + fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError> { let assembler = self.assembler.as_mut().unwrap(); let size = get_size_of_type(&ty)?; @@ -246,8 +262,8 @@ impl FunctionCodeGenerator for X64FunctionCode { message: "I32Add type mismatch", }); } - Self::gen_rt_pop(assembler, &b); - Self::gen_rt_pop(assembler, &a); + Self::gen_rt_pop(assembler, &b)?; + Self::gen_rt_pop(assembler, &a)?; self.value_stack.push(WpType::I32); @@ -269,6 +285,35 @@ impl FunctionCodeGenerator for X64FunctionCode { let info = self.value_stack.pop()?; Self::gen_rt_pop(assembler, &info)?; } + Operator::Return => match self.returns.len() { + 0 => {} + 1 => { + let val = self.value_stack.pop()?; + let ty = self.returns[0]; + let reg = val.location.get_register()?; + if is_dword(get_size_of_type(&ty)?) { + dynasm!( + assembler + ; mov eax, Rd(Register::from_scratch_reg(reg) as u8) + ; jmp =>self.cleanup_label + ); + } else { + dynasm!( + assembler + ; mov rax, Rq(Register::from_scratch_reg(reg) as u8) + ; jmp =>self.cleanup_label + ); + } + } + _ => { + return Err(CodegenError { + message: "multiple return values is not yet supported", + }) + } + }, + Operator::End => { + // todo + } _ => unimplemented!(), } Ok(()) From 8d8db4aa092a4665d5454d75c15debed7539e0ef Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 14 Feb 2019 00:53:36 +0800 Subject: [PATCH 010/100] Starting integration. --- Cargo.lock | 1 + lib/dynasm-backend/src/lib.rs | 72 +++++++++++++++++++++++++++++++++ lib/dynasm-backend/src/parse.rs | 13 +++--- lib/runtime/Cargo.toml | 5 ++- lib/runtime/src/lib.rs | 4 +- 5 files changed, 86 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8f1bd4cf7ea..5318d9effe4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -896,6 +896,7 @@ version = "0.1.4" dependencies = [ "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "wasmer-clif-backend 0.1.2", + "wasmer-dynasm-backend 0.1.0", "wasmer-runtime-core 0.1.2", ] diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index 95aa18e8f55..3b4ffdbe2e6 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -10,3 +10,75 @@ mod codegen; mod codegen_x64; mod parse; mod stack; + +use std::ptr::NonNull; +use wasmer_runtime_core::{ + backend::{Backend, Compiler, FuncResolver, ProtectedCaller, Token, UserTrapper}, + error::{CompileError, CompileResult, RuntimeResult}, + module::{ModuleInfo, ModuleInner, StringTable}, + structures::{Map, TypedIndex}, + types::{ + FuncIndex, FuncSig, GlobalIndex, LocalFuncIndex, MemoryIndex, SigIndex, TableIndex, Type, + Value, + }, + vm::{self, ImportBacking}, +}; + +struct Placeholder; + +impl FuncResolver for Placeholder { + fn get( + &self, + _module: &ModuleInner, + _local_func_index: LocalFuncIndex, + ) -> Option> { + None + } +} + +impl ProtectedCaller for Placeholder { + fn call( + &self, + _module: &ModuleInner, + _func_index: FuncIndex, + _params: &[Value], + _import_backing: &ImportBacking, + _vmctx: *mut vm::Ctx, + _: Token, + ) -> RuntimeResult> { + Ok(vec![]) + } + + fn get_early_trapper(&self) -> Box { + pub struct Trapper; + + impl UserTrapper for Trapper { + unsafe fn do_early_trap(&self, msg: String) -> ! { + panic!("{}", msg); + } + } + + Box::new(Trapper) + } +} + +pub struct SinglePassCompiler {} + +impl Compiler for SinglePassCompiler { + fn compile(&self, wasm: &[u8], _: Token) -> CompileResult { + let mut mcg = codegen_x64::X64ModuleCodeGenerator::new(); + let info = match parse::read_module(wasm, Backend::Dynasm, &mut mcg) { + Ok(x) => x, + Err(e) => { + return Err(CompileError::InternalError { + msg: format!("{:?}", e), + }) + } + }; + Ok(ModuleInner { + func_resolver: Box::new(Placeholder), + protected_caller: Box::new(Placeholder), + info: info, + }) + } +} diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs index bc5ef9a3278..73e2e0a8eb9 100644 --- a/lib/dynasm-backend/src/parse.rs +++ b/lib/dynasm-backend/src/parse.rs @@ -41,7 +41,7 @@ impl From for LoadError { pub fn read_module, FCG: FunctionCodeGenerator>( wasm: &[u8], backend: Backend, - mut mcg: MCG, + mcg: &mut MCG, ) -> Result { let mut info = ModuleInfo { memories: Map::new(), @@ -72,6 +72,7 @@ pub fn read_module, FCG: FunctionCodeGenerator>( loop { if reader.eof() { + mcg.finalize()?; return Ok(info); } @@ -269,12 +270,14 @@ pub fn read_module, FCG: FunctionCodeGenerator>( for i in 0..code_reader.get_count() { let item = code_reader.read()?; let mut fcg = mcg.next_function()?; - for param in info + let sig = info .signatures .get(*info.func_assoc.get(FuncIndex::new(i as usize)).unwrap()) - .unwrap() - .params() - { + .unwrap(); + for ret in sig.returns() { + fcg.feed_return(type_to_wp_type(*ret))?; + } + for param in sig.params() { fcg.feed_param(type_to_wp_type(*param))?; } for local in item.get_locals_reader()? { diff --git a/lib/runtime/Cargo.toml b/lib/runtime/Cargo.toml index 2a247819337..0debeeeed63 100644 --- a/lib/runtime/Cargo.toml +++ b/lib/runtime/Cargo.toml @@ -11,10 +11,11 @@ readme = "README.md" [dependencies] wasmer-runtime-core = { path = "../runtime-core", version = "0.1.2" } wasmer-clif-backend = { path = "../clif-backend", version = "0.1.2", optional = true } +wasmer-dynasm-backend = { path = "../dynasm-backend", optional = true } lazy_static = "1.2.0" [features] -default = ["default-compiler", "cache"] -default-compiler = ["wasmer-clif-backend/cache", "wasmer-runtime-core/cache"] +default = ["default-compiler"] +default-compiler = ["wasmer-clif-backend", "wasmer-dynasm-backend"] cache = ["default-compiler"] debug = ["wasmer-clif-backend/debug", "wasmer-runtime-core/debug"] diff --git a/lib/runtime/src/lib.rs b/lib/runtime/src/lib.rs index f100a5bad02..5e991a5ead6 100644 --- a/lib/runtime/src/lib.rs +++ b/lib/runtime/src/lib.rs @@ -188,10 +188,10 @@ pub fn compile_cache(wasm: &[u8]) -> error::CompileResult { #[cfg(feature = "default-compiler")] fn default_compiler() -> &'static dyn Compiler { use lazy_static::lazy_static; - use wasmer_clif_backend::CraneliftCompiler; + use wasmer_dynasm_backend::SinglePassCompiler; lazy_static! { - static ref DEFAULT_COMPILER: CraneliftCompiler = { CraneliftCompiler::new() }; + static ref DEFAULT_COMPILER: SinglePassCompiler = { SinglePassCompiler {} }; } &*DEFAULT_COMPILER as &dyn Compiler From 1526d358725a18f5b95d4cbf9b0cface8ebcfaff Mon Sep 17 00:00:00 2001 From: losfair Date: Fri, 15 Feb 2019 02:21:04 +0800 Subject: [PATCH 011/100] Emit a Return opcode if the last one is not return. --- lib/dynasm-backend/src/parse.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs index 73e2e0a8eb9..51b17417f9b 100644 --- a/lib/dynasm-backend/src/parse.rs +++ b/lib/dynasm-backend/src/parse.rs @@ -1,7 +1,7 @@ use crate::codegen::{CodegenError, FunctionCodeGenerator, ModuleCodeGenerator}; use std::sync::Arc; use wasmer_runtime_core::{ - backend::Backend, + backend::{Backend, ProtectedCaller}, module::{ DataInitializer, ExportIndex, ImportName, ModuleInfo, StringTable, StringTableBuilder, TableInitializer, @@ -38,7 +38,11 @@ impl From for LoadError { } } -pub fn read_module, FCG: FunctionCodeGenerator>( +pub fn read_module< + MCG: ModuleCodeGenerator, + FCG: FunctionCodeGenerator, + PC: ProtectedCaller, +>( wasm: &[u8], backend: Backend, mcg: &mut MCG, @@ -72,7 +76,6 @@ pub fn read_module, FCG: FunctionCodeGenerator>( loop { if reader.eof() { - mcg.finalize()?; return Ok(info); } @@ -285,10 +288,18 @@ pub fn read_module, FCG: FunctionCodeGenerator>( fcg.feed_local(ty, count as usize)?; } fcg.begin_body()?; + let mut last_is_return = false; for op in item.get_operators_reader()? { let op = op?; + last_is_return = match op { + Operator::Return => true, + _ => false, + }; fcg.feed_opcode(op)?; } + if !last_is_return { + fcg.feed_opcode(Operator::Return)?; + } fcg.finalize()?; } } From bb52a4e6ebd703d50038e8b85d84010280574ec6 Mon Sep 17 00:00:00 2001 From: losfair Date: Fri, 15 Feb 2019 02:21:52 +0800 Subject: [PATCH 012/100] Now we can run `add`! --- lib/dynasm-backend/src/codegen.rs | 5 +- lib/dynasm-backend/src/codegen_x64.rs | 91 +++++++++++++++++++++++++-- lib/dynasm-backend/src/lib.rs | 57 +++++++---------- 3 files changed, 111 insertions(+), 42 deletions(-) diff --git a/lib/dynasm-backend/src/codegen.rs b/lib/dynasm-backend/src/codegen.rs index 2695d24352a..18e67f5e17f 100644 --- a/lib/dynasm-backend/src/codegen.rs +++ b/lib/dynasm-backend/src/codegen.rs @@ -1,8 +1,9 @@ +use wasmer_runtime_core::backend::ProtectedCaller; use wasmparser::{Operator, Type as WpType}; -pub trait ModuleCodeGenerator { +pub trait ModuleCodeGenerator { fn next_function(&mut self) -> Result<&mut FCG, CodegenError>; - fn finalize(&mut self) -> Result<(), CodegenError>; + fn finalize(self) -> Result; } pub trait FunctionCodeGenerator { diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index c6f3dc49559..3d0582530af 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -1,6 +1,19 @@ use super::codegen::*; use super::stack::{ValueInfo, ValueLocation, ValueStack}; -use dynasmrt::{x64::Assembler, DynamicLabel, DynasmApi, DynasmLabelApi}; +use dynasmrt::{ + x64::Assembler, AssemblyOffset, DynamicLabel, DynasmApi, DynasmLabelApi, ExecutableBuffer, +}; +use wasmer_runtime_core::{ + backend::{Backend, Compiler, FuncResolver, ProtectedCaller, Token, UserTrapper}, + error::{CompileError, CompileResult, RuntimeError, RuntimeResult}, + module::{ModuleInfo, ModuleInner, StringTable}, + structures::{Map, TypedIndex}, + types::{ + FuncIndex, FuncSig, GlobalIndex, LocalFuncIndex, MemoryIndex, SigIndex, TableIndex, Type, + Value, + }, + vm::{self, ImportBacking}, +}; use wasmparser::{Operator, Type as WpType}; #[repr(u8)] @@ -49,6 +62,7 @@ pub struct X64ModuleCodeGenerator { pub struct X64FunctionCode { id: usize, begin_label: DynamicLabel, + begin_offset: AssemblyOffset, cleanup_label: DynamicLabel, assembler: Option, returns: Vec, @@ -58,6 +72,55 @@ pub struct X64FunctionCode { value_stack: ValueStack, } +pub struct X64ExecutionContext { + code: ExecutableBuffer, + functions: Vec, +} + +impl ProtectedCaller for X64ExecutionContext { + fn call( + &self, + _module: &ModuleInner, + _func_index: FuncIndex, + _params: &[Value], + _import_backing: &ImportBacking, + _vmctx: *mut vm::Ctx, + _: Token, + ) -> RuntimeResult> { + let index = _func_index.index(); + let ptr = self.code.ptr(self.functions[index].begin_offset); + let return_ty = self.functions[index].returns.last().cloned(); + + if self.functions[index].num_params != _params.len() { + return Err(RuntimeError::User { + msg: "param count mismatch".into(), + }); + } + + match self.functions[index].num_params { + 2 => unsafe { + let ptr: extern "C" fn(i64, i64) -> i64 = ::std::mem::transmute(ptr); + Ok(vec![Value::I32( + ptr(value_to_i64(&_params[0]), value_to_i64(&_params[1])) as i32, + )]) + }, + _ => unimplemented!(), + } + } + + fn get_early_trapper(&self) -> Box { + pub struct Trapper; + + impl UserTrapper for Trapper { + unsafe fn do_early_trap(&self, msg: String) -> ! { + panic!("{}", msg); + } + } + + Box::new(Trapper) + } +} + #[derive(Copy, Clone, Debug)] struct Local { ty: WpType, @@ -70,7 +133,7 @@ impl X64ModuleCodeGenerator { } } -impl ModuleCodeGenerator for X64ModuleCodeGenerator { +impl ModuleCodeGenerator for X64ModuleCodeGenerator { fn next_function(&mut self) -> Result<&mut X64FunctionCode, CodegenError> { let mut assembler = match self.functions.last_mut() { Some(x) => x.assembler.take().unwrap(), @@ -84,6 +147,7 @@ impl ModuleCodeGenerator for X64ModuleCodeGenerator { }, }; let begin_label = assembler.new_dynamic_label(); + let begin_offset = assembler.offset(); dynasm!( assembler ; => begin_label @@ -93,6 +157,7 @@ impl ModuleCodeGenerator for X64ModuleCodeGenerator { let code = X64FunctionCode { id: self.functions.len(), begin_label: begin_label, + begin_offset: begin_offset, cleanup_label: assembler.new_dynamic_label(), assembler: Some(assembler), returns: vec![], @@ -105,13 +170,20 @@ impl ModuleCodeGenerator for X64ModuleCodeGenerator { Ok(self.functions.last_mut().unwrap()) } - fn finalize(&mut self) -> Result<(), CodegenError> { + fn finalize(mut self) -> Result { let mut assembler = match self.functions.last_mut() { Some(x) => x.assembler.take().unwrap(), - None => return Ok(()), + None => { + return Err(CodegenError { + message: "no function", + }) + } }; let output = assembler.finalize().unwrap(); - Ok(()) + Ok(X64ExecutionContext { + code: output, + functions: self.functions, + }) } } @@ -345,3 +417,12 @@ fn get_size_of_type(ty: &WpType) -> Result { fn is_dword(n: usize) -> bool { n == 4 } + +fn value_to_i64(v: &Value) -> i64 { + match *v { + Value::F32(x) => x.to_bits() as u64 as i64, + Value::F64(x) => x.to_bits() as u64 as i64, + Value::I32(x) => x as u64 as i64, + Value::I64(x) => x as u64 as i64, + } +} diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index 3b4ffdbe2e6..f7f751c371f 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -11,6 +11,8 @@ mod codegen_x64; mod parse; mod stack; +use crate::codegen::{CodegenError, ModuleCodeGenerator}; +use crate::parse::LoadError; use std::ptr::NonNull; use wasmer_runtime_core::{ backend::{Backend, Compiler, FuncResolver, ProtectedCaller, Token, UserTrapper}, @@ -32,53 +34,38 @@ impl FuncResolver for Placeholder { _module: &ModuleInner, _local_func_index: LocalFuncIndex, ) -> Option> { + panic!(); None } } -impl ProtectedCaller for Placeholder { - fn call( - &self, - _module: &ModuleInner, - _func_index: FuncIndex, - _params: &[Value], - _import_backing: &ImportBacking, - _vmctx: *mut vm::Ctx, - _: Token, - ) -> RuntimeResult> { - Ok(vec![]) - } - - fn get_early_trapper(&self) -> Box { - pub struct Trapper; - - impl UserTrapper for Trapper { - unsafe fn do_early_trap(&self, msg: String) -> ! { - panic!("{}", msg); - } - } - - Box::new(Trapper) - } -} - pub struct SinglePassCompiler {} impl Compiler for SinglePassCompiler { fn compile(&self, wasm: &[u8], _: Token) -> CompileResult { let mut mcg = codegen_x64::X64ModuleCodeGenerator::new(); - let info = match parse::read_module(wasm, Backend::Dynasm, &mut mcg) { - Ok(x) => x, - Err(e) => { - return Err(CompileError::InternalError { - msg: format!("{:?}", e), - }) - } - }; + let info = parse::read_module(wasm, Backend::Dynasm, &mut mcg)?; + let ec = mcg.finalize()?; Ok(ModuleInner { func_resolver: Box::new(Placeholder), - protected_caller: Box::new(Placeholder), + protected_caller: Box::new(ec), info: info, }) } } + +impl From for CompileError { + fn from(other: CodegenError) -> CompileError { + CompileError::InternalError { + msg: other.message.into(), + } + } +} + +impl From for CompileError { + fn from(other: LoadError) -> CompileError { + CompileError::InternalError { + msg: format!("{:?}", other), + } + } +} From 5583e96d9657712e6bf18b4442531e680a484f1e Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 19 Feb 2019 19:19:40 +0800 Subject: [PATCH 013/100] Mitigate a bug that causes incorrect code generation. Still need to figure out why R8 and higher registers don't work. --- lib/dynasm-backend/src/codegen_x64.rs | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 3d0582530af..71629a9f054 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -153,6 +153,7 @@ impl ModuleCodeGenerator for X64ModuleCode ; => begin_label ; push rbp ; mov rbp, rsp + //; int 3 ); let code = X64FunctionCode { id: self.functions.len(), @@ -164,7 +165,7 @@ impl ModuleCodeGenerator for X64ModuleCode locals: vec![], num_params: 0, current_stack_offset: 0, - value_stack: ValueStack::new(8), + value_stack: ValueStack::new(4), }; self.functions.push(code); Ok(self.functions.last_mut().unwrap()) @@ -334,9 +335,6 @@ impl FunctionCodeGenerator for X64FunctionCode { message: "I32Add type mismatch", }); } - Self::gen_rt_pop(assembler, &b)?; - Self::gen_rt_pop(assembler, &a)?; - self.value_stack.push(WpType::I32); if a.location.is_register() && b.location.is_register() { @@ -349,8 +347,27 @@ impl FunctionCodeGenerator for X64FunctionCode { assembler ; add Rd(a_reg as u8), Rd(b_reg as u8) ); + } else if a.location.is_register() { + let a_reg = Register::from_scratch_reg(a.location.get_register()?); + dynasm!( + assembler + ; mov eax, [rsp] + ; add rsp, 4 + ; add Rd(a_reg as u8), eax + ); + } else if b.location.is_register() { + unreachable!(); } else { - unimplemented!(); + dynasm!( + assembler + ; push rcx + ; mov eax, [rsp + 12] + ; mov ecx, [rsp + 8] + ; add eax, ecx + ; mov [rsp + 12], eax + ; pop rcx + ; add rsp, 4 + ); } } Operator::Drop => { From 93d2713bde199c2d4c889471a1480a0d2e6d41b1 Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 19 Feb 2019 20:25:09 +0800 Subject: [PATCH 014/100] Operators. --- lib/dynasm-backend/src/codegen_x64.rs | 284 ++++++++++++++++++++++---- 1 file changed, 244 insertions(+), 40 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 71629a9f054..dda34a74fdd 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -52,6 +52,22 @@ impl Register { _ => unreachable!(), } } + + pub fn is_used(&self, stack: &ValueStack) -> bool { + use self::Register::*; + for val in &stack.values { + match val.location { + ValueLocation::Register(x) => { + if Register::from_scratch_reg(x) == *self { + return true; + } + } + ValueLocation::Stack => break, + } + } + + false + } } #[derive(Default)] @@ -202,6 +218,136 @@ impl X64FunctionCode { } Ok(()) } + + /// Emits a binary operator. + /// + /// Guarantees that the first Register parameter to callback `f` will never be `Register::RAX`. + fn emit_binop_i32( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + ) -> Result<(), CodegenError> { + let (a, b) = value_stack.pop2()?; + if a.ty != WpType::I32 || b.ty != WpType::I32 { + return Err(CodegenError { + message: "I32Add type mismatch", + }); + } + value_stack.push(WpType::I32); + + if a.location.is_register() && b.location.is_register() { + // output is in a_reg. + f( + assembler, + value_stack, + Register::from_scratch_reg(a.location.get_register()?), + Register::from_scratch_reg(b.location.get_register()?), + ); + } else if a.location.is_register() { + dynasm!( + assembler + ; mov eax, [rsp] + ; add rsp, 4 + ); + f( + assembler, + value_stack, + Register::from_scratch_reg(a.location.get_register()?), + Register::RAX, + ); + } else if b.location.is_register() { + unreachable!(); + } else { + dynasm!( + assembler + ; push rcx + ; mov ecx, [rsp + 12] + ; mov eax, [rsp + 8] + ); + f(assembler, value_stack, Register::RCX, Register::RAX); + dynasm!( + assembler + ; mov [rsp + 12], ecx + ; pop rcx + ; add rsp, 4 + ); + } + + Ok(()) + } + + fn emit_div_i32( + assembler: &mut Assembler, + value_stack: &ValueStack, + left: Register, + right: Register, + signed: bool, + out: Register, + ) { + let dx_used = Register::RDX.is_used(value_stack); + if dx_used { + dynasm!( + assembler + ; push rdx + ); + } + + if right == Register::RAX { + dynasm!( + assembler + ; push rax + ; mov eax, Rd(left as u8) + ; mov edx, 0 + ; mov Rd(left as u8), [rsp] + ); + + if signed { + dynasm!( + assembler + ; idiv Rd(left as u8) + ); + } else { + dynasm!( + assembler + ; div Rd(left as u8) + ); + } + + dynasm!( + assembler + ; mov Rd(left as u8), Rd(out as u8) + ; pop rax + ); + } else { + dynasm!( + assembler + ; mov eax, Rd(left as u8) + ; mov edx, 0 + ); + if signed { + dynasm!( + assembler + ; idiv Rd(right as u8) + ); + } else { + dynasm!( + assembler + ; div Rd(right as u8) + ); + } + dynasm!( + assembler + ; mov Rd(left as u8), Rd(out as u8) + ); + } + + if dx_used { + dynasm!( + assembler + ; pop rdx + ); + } + } } impl FunctionCodeGenerator for X64FunctionCode { @@ -329,46 +475,104 @@ impl FunctionCodeGenerator for X64FunctionCode { } } Operator::I32Add => { - let (a, b) = self.value_stack.pop2()?; - if a.ty != WpType::I32 || b.ty != WpType::I32 { - return Err(CodegenError { - message: "I32Add type mismatch", - }); - } - self.value_stack.push(WpType::I32); - - if a.location.is_register() && b.location.is_register() { - let (a_reg, b_reg) = ( - Register::from_scratch_reg(a.location.get_register()?), - Register::from_scratch_reg(b.location.get_register()?), - ); - // output is in a_reg. - dynasm!( - assembler - ; add Rd(a_reg as u8), Rd(b_reg as u8) - ); - } else if a.location.is_register() { - let a_reg = Register::from_scratch_reg(a.location.get_register()?); - dynasm!( - assembler - ; mov eax, [rsp] - ; add rsp, 4 - ; add Rd(a_reg as u8), eax - ); - } else if b.location.is_register() { - unreachable!(); - } else { - dynasm!( - assembler - ; push rcx - ; mov eax, [rsp + 12] - ; mov ecx, [rsp + 8] - ; add eax, ecx - ; mov [rsp + 12], eax - ; pop rcx - ; add rsp, 4 - ); - } + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; add Rd(left as u8), Rd(right as u8) + ) + }, + )?; + } + Operator::I32Sub => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; sub Rd(left as u8), Rd(right as u8) + ) + }, + )?; + } + Operator::I32Mul => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; imul Rd(left as u8), Rd(right as u8) + ) + }, + )?; + } + Operator::I32DivU => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i32( + assembler, + value_stack, + left, + right, + false, + Register::RAX, + ); + }, + )?; + } + Operator::I32DivS => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i32( + assembler, + value_stack, + left, + right, + true, + Register::RAX, + ); + }, + )?; + } + Operator::I32RemU => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i32( + assembler, + value_stack, + left, + right, + false, + Register::RDX, + ); + }, + )?; + } + Operator::I32RemS => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i32( + assembler, + value_stack, + left, + right, + true, + Register::RDX, + ); + }, + )?; } Operator::Drop => { let info = self.value_stack.pop()?; From 61c83507a4160c168f279c7ab026ea0674588dce Mon Sep 17 00:00:00 2001 From: losfair Date: Wed, 20 Feb 2019 22:56:32 +0800 Subject: [PATCH 015/100] Control frames, jumps & stack unwinding. --- lib/dynasm-backend/src/codegen_x64.rs | 198 +++++++++++++++++++++++++- lib/dynasm-backend/src/stack.rs | 17 ++- 2 files changed, 211 insertions(+), 4 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index dda34a74fdd..40ae70c4522 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -1,5 +1,5 @@ use super::codegen::*; -use super::stack::{ValueInfo, ValueLocation, ValueStack}; +use super::stack::{ControlFrame, ControlStack, ValueInfo, ValueLocation, ValueStack}; use dynasmrt::{ x64::Assembler, AssemblyOffset, DynamicLabel, DynasmApi, DynasmLabelApi, ExecutableBuffer, }; @@ -86,6 +86,7 @@ pub struct X64FunctionCode { num_params: usize, current_stack_offset: usize, value_stack: ValueStack, + control_stack: Option, } pub struct X64ExecutionContext { @@ -182,6 +183,7 @@ impl ModuleCodeGenerator for X64ModuleCode num_params: 0, current_stack_offset: 0, value_stack: ValueStack::new(4), + control_stack: None, }; self.functions.push(code); Ok(self.functions.last_mut().unwrap()) @@ -348,6 +350,159 @@ impl X64FunctionCode { ); } } + + fn emit_leave_frame( + assembler: &mut Assembler, + frame: &ControlFrame, + value_stack: &mut ValueStack, + ) -> Result<(), CodegenError> { + let ret_ty = match frame.returns.len() { + 1 => Some(frame.returns[0]), + 0 => None, + _ => { + return Err(CodegenError { + message: "more than one block returns are not yet supported", + }) + } + }; + + if ret_ty.is_some() && frame.loop_like { + return Err(CodegenError { + message: "return value is not supported for loops", + }); + } + + if value_stack.values.len() < frame.value_stack_depth_before + frame.returns.len() { + return Err(CodegenError { + message: "value stack underflow", + }); + } + + if let Some(ty) = ret_ty { + let ret = value_stack.pop()?; + match ret.location { + ValueLocation::Register(x) => { + dynasm!( + assembler + ; mov rax, Rq(x) + ); + } + ValueLocation::Stack => { + if is_dword(get_size_of_type(&ty)?) { + dynasm!( + assembler + ; mov eax, [rsp] + ; add rsp, 4 + ); + } else { + dynasm!( + assembler + ; mov rax, [rsp] + ; add rsp, 8 + ); + } + } + } + } + + Ok(()) + } + + fn emit_block_end( + assembler: &mut Assembler, + control_stack: &mut ControlStack, + value_stack: &mut ValueStack, + ) -> Result<(), CodegenError> { + let frame = match control_stack.frames.pop() { + Some(x) => x, + None => { + return Err(CodegenError { + message: "no frame", + }) + } + }; + + Self::emit_leave_frame(assembler, &frame, value_stack)?; + + if value_stack.values.len() != frame.value_stack_depth_before { + return Err(CodegenError { + message: "value_stack.values.len() != frame.value_stack_depth_before", + }); + } + + if !frame.loop_like { + dynasm!( + assembler + ; => frame.label + ); + } + + if frame.returns.len() == 1 { + let loc = value_stack.push(frame.returns[0]); + match loc { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rq(x as u8), rax + ); + } + ValueLocation::Stack => { + if is_dword(get_size_of_type(&frame.returns[0])?) { + dynasm!( + assembler + ; sub rsp, 4 + ; mov [rsp], eax + ); + } else { + dynasm!( + assembler + ; sub rsp, 8 + ; mov [rsp], rax + ); + } + } + } + } + + Ok(()) + } + + fn emit_jmp( + assembler: &mut Assembler, + control_stack: &ControlStack, + value_stack: &mut ValueStack, + relative_frame_offset: usize, + ) -> Result<(), CodegenError> { + let frame = if relative_frame_offset >= control_stack.frames.len() { + return Err(CodegenError { + message: "jmp offset out of bounds", + }); + } else { + &control_stack.frames[control_stack.frames.len() - 1 - relative_frame_offset] + }; + + Self::emit_leave_frame(assembler, frame, value_stack)?; + + let mut sp_diff: usize = 0; + + for i in 0..value_stack.values.len() - frame.value_stack_depth_before { + let vi = value_stack.values[value_stack.values.len() - 1 - i]; + if vi.location == ValueLocation::Stack { + sp_diff += get_size_of_type(&vi.ty)?; + } else { + break; + } + } + + dynasm!( + assembler + ; add rsp, sp_diff as i32 + ; jmp =>frame.label + ); + + Ok(()) + } } impl FunctionCodeGenerator for X64FunctionCode { @@ -425,6 +580,10 @@ impl FunctionCodeGenerator for X64FunctionCode { Ok(()) } fn begin_body(&mut self) -> Result<(), CodegenError> { + self.control_stack = Some(ControlStack::new( + self.assembler.as_mut().unwrap().new_dynamic_label(), + self.returns.clone(), + )); Ok(()) } fn feed_opcode(&mut self, op: Operator) -> Result<(), CodegenError> { @@ -574,6 +733,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, )?; } + Operator::Block { ty } => {} Operator::Drop => { let info = self.value_stack.pop()?; Self::gen_rt_pop(assembler, &info)?; @@ -605,22 +765,56 @@ impl FunctionCodeGenerator for X64FunctionCode { } }, Operator::End => { - // todo + Self::emit_block_end( + assembler, + self.control_stack.as_mut().unwrap(), + &mut self.value_stack, + )?; } _ => unimplemented!(), } Ok(()) } + fn finalize(&mut self) -> Result<(), CodegenError> { let assembler = self.assembler.as_mut().unwrap(); + dynasm!( assembler ; ud2 ; => self.cleanup_label + ); + + if self.returns.len() == 1 { + if self.value_stack.values.len() != 1 { + return Err(CodegenError { + message: "returns.len() != value_stack.values.len()", + }); + } + let value_info = self.value_stack.pop()?; + if value_info.ty != self.returns[0] { + return Err(CodegenError { + message: "return type mismatch", + }); + } + if let ValueLocation::Register(x) = value_info.location { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov rax, Rq(reg as u8) + ); + } else { + unreachable!(); + } + } + + dynasm!( + assembler ; mov rsp, rbp ; pop rbp ; ret ); + Ok(()) } } diff --git a/lib/dynasm-backend/src/stack.rs b/lib/dynasm-backend/src/stack.rs index 2ace6bd5105..8426b708dba 100644 --- a/lib/dynasm-backend/src/stack.rs +++ b/lib/dynasm-backend/src/stack.rs @@ -46,7 +46,7 @@ pub struct ValueInfo { pub location: ValueLocation, } -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum ValueLocation { Register(u8), Stack, @@ -66,7 +66,7 @@ impl ValueLocation { Ok(id) } else { Err(CodegenError { - message: "not a register location" + message: "not a register location", }) } } @@ -140,3 +140,16 @@ impl ValueStack { self.values.truncate(target_depth); } } + +impl ControlStack { + pub fn new(label: DynamicLabel, returns: Vec) -> ControlStack { + ControlStack { + frames: vec![ControlFrame { + label: label, + loop_like: false, + returns: returns, + value_stack_depth_before: 0, + }], + } + } +} From aaabbf169c20fd96556c679e3a68716a96bfe07b Mon Sep 17 00:00:00 2001 From: losfair Date: Wed, 20 Feb 2019 23:21:33 +0800 Subject: [PATCH 016/100] Operator::Br and control stack fixes --- lib/dynasm-backend/src/codegen_x64.rs | 135 +++++++++++++------------- 1 file changed, 65 insertions(+), 70 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 40ae70c4522..ebb675d45a2 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -79,7 +79,6 @@ pub struct X64FunctionCode { id: usize, begin_label: DynamicLabel, begin_offset: AssemblyOffset, - cleanup_label: DynamicLabel, assembler: Option, returns: Vec, locals: Vec, @@ -170,13 +169,12 @@ impl ModuleCodeGenerator for X64ModuleCode ; => begin_label ; push rbp ; mov rbp, rsp - //; int 3 + ; int 3 ); let code = X64FunctionCode { id: self.functions.len(), begin_label: begin_label, begin_offset: begin_offset, - cleanup_label: assembler.new_dynamic_label(), assembler: Some(assembler), returns: vec![], locals: vec![], @@ -351,6 +349,39 @@ impl X64FunctionCode { } } + fn emit_pop_into_ax( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + ) -> Result<(), CodegenError> { + let val = value_stack.pop()?; + match val.location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov rax, Rq(reg as u8) + ); + } + ValueLocation::Stack => { + if is_dword(get_size_of_type(&val.ty)?) { + dynasm!( + assembler + ; mov eax, [rsp] + ; add rsp, 4 + ); + } else { + dynasm!( + assembler + ; mov rax, [rsp] + ; add rsp, 8 + ); + } + } + } + + Ok(()) + } + fn emit_leave_frame( assembler: &mut Assembler, frame: &ControlFrame, @@ -379,30 +410,12 @@ impl X64FunctionCode { } if let Some(ty) = ret_ty { - let ret = value_stack.pop()?; - match ret.location { - ValueLocation::Register(x) => { - dynasm!( - assembler - ; mov rax, Rq(x) - ); - } - ValueLocation::Stack => { - if is_dword(get_size_of_type(&ty)?) { - dynasm!( - assembler - ; mov eax, [rsp] - ; add rsp, 4 - ); - } else { - dynasm!( - assembler - ; mov rax, [rsp] - ; add rsp, 8 - ); - } - } + if value_stack.values.iter().last().map(|x| x.ty) != ret_ty { + return Err(CodegenError { + message: "value type != return type", + }); } + Self::emit_pop_into_ax(assembler, value_stack)?; } Ok(()) @@ -741,22 +754,19 @@ impl FunctionCodeGenerator for X64FunctionCode { Operator::Return => match self.returns.len() { 0 => {} 1 => { - let val = self.value_stack.pop()?; - let ty = self.returns[0]; - let reg = val.location.get_register()?; - if is_dword(get_size_of_type(&ty)?) { - dynasm!( - assembler - ; mov eax, Rd(Register::from_scratch_reg(reg) as u8) - ; jmp =>self.cleanup_label - ); - } else { - dynasm!( - assembler - ; mov rax, Rq(Register::from_scratch_reg(reg) as u8) - ; jmp =>self.cleanup_label - ); + if self.value_stack.values.iter().last().map(|x| x.ty) != Some(self.returns[0]) + { + return Err(CodegenError { + message: "self.value_stack.last().cloned() != Some(self.returns[0])", + }); } + Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; + dynasm!( + assembler + ; mov rsp, rbp + ; pop rbp + ; ret + ); } _ => { return Err(CodegenError { @@ -771,6 +781,14 @@ impl FunctionCodeGenerator for X64FunctionCode { &mut self.value_stack, )?; } + Operator::Br { relative_depth } => { + Self::emit_jmp( + assembler, + self.control_stack.as_ref().unwrap(), + &mut self.value_stack, + relative_depth as usize, + )?; + } _ => unimplemented!(), } Ok(()) @@ -782,39 +800,16 @@ impl FunctionCodeGenerator for X64FunctionCode { dynasm!( assembler ; ud2 - ; => self.cleanup_label ); - if self.returns.len() == 1 { - if self.value_stack.values.len() != 1 { - return Err(CodegenError { - message: "returns.len() != value_stack.values.len()", - }); - } - let value_info = self.value_stack.pop()?; - if value_info.ty != self.returns[0] { - return Err(CodegenError { - message: "return type mismatch", - }); - } - if let ValueLocation::Register(x) = value_info.location { - let reg = Register::from_scratch_reg(x); - dynasm!( - assembler - ; mov rax, Rq(reg as u8) - ); - } else { - unreachable!(); - } + if self.value_stack.values.len() != 0 + || self.control_stack.as_ref().unwrap().frames.len() != 0 + { + return Err(CodegenError { + message: "control/value stack not empty at end of function", + }); } - dynasm!( - assembler - ; mov rsp, rbp - ; pop rbp - ; ret - ); - Ok(()) } } From 6f97ebd5f7b83d92af2cbeb2a42b32448877e297 Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 21 Feb 2019 21:12:58 +0800 Subject: [PATCH 017/100] Remove a hack in parser. --- lib/dynasm-backend/src/parse.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs index 51b17417f9b..daa84a13966 100644 --- a/lib/dynasm-backend/src/parse.rs +++ b/lib/dynasm-backend/src/parse.rs @@ -288,18 +288,10 @@ pub fn read_module< fcg.feed_local(ty, count as usize)?; } fcg.begin_body()?; - let mut last_is_return = false; for op in item.get_operators_reader()? { let op = op?; - last_is_return = match op { - Operator::Return => true, - _ => false, - }; fcg.feed_opcode(op)?; } - if !last_is_return { - fcg.feed_opcode(Operator::Return)?; - } fcg.finalize()?; } } From 63b3f41f05d1a0e0b18ce0aca02d3b39ee1572e0 Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 21 Feb 2019 21:14:10 +0800 Subject: [PATCH 018/100] Handle unreachable and fix return. --- lib/dynasm-backend/src/codegen_x64.rs | 100 +++++++++++++++++++------- 1 file changed, 74 insertions(+), 26 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index ebb675d45a2..2bcaa5d3313 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -86,6 +86,7 @@ pub struct X64FunctionCode { current_stack_offset: usize, value_stack: ValueStack, control_stack: Option, + unreachable_depth: usize, } pub struct X64ExecutionContext { @@ -182,6 +183,7 @@ impl ModuleCodeGenerator for X64ModuleCode current_stack_offset: 0, value_stack: ValueStack::new(4), control_stack: None, + unreachable_depth: 0, }; self.functions.push(code); Ok(self.functions.last_mut().unwrap()) @@ -516,6 +518,38 @@ impl X64FunctionCode { Ok(()) } + + fn emit_return( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + returns: &Vec, + ) -> Result<(), CodegenError> { + match returns.len() { + 0 => {} + 1 => { + if value_stack.values.iter().last().map(|x| x.ty) != Some(returns[0]) { + return Err(CodegenError { + message: "self.value_stack.last().cloned() != Some(self.returns[0])", + }); + } + Self::emit_pop_into_ax(assembler, value_stack)?; + } + _ => { + return Err(CodegenError { + message: "multiple return values is not yet supported", + }) + } + } + + dynasm!( + assembler + ; mov rsp, rbp + ; pop rbp + ; ret + ); + + Ok(()) + } } impl FunctionCodeGenerator for X64FunctionCode { @@ -600,7 +634,28 @@ impl FunctionCodeGenerator for X64FunctionCode { Ok(()) } fn feed_opcode(&mut self, op: Operator) -> Result<(), CodegenError> { + let was_unreachable; + + if self.unreachable_depth > 0 { + was_unreachable = true; + match op { + Operator::Block { .. } | Operator::Loop { .. } | Operator::If { .. } => { + self.unreachable_depth += 1; + } + Operator::End => { + self.unreachable_depth -= 1; + } + _ => {} + } + if self.unreachable_depth > 0 { + return Ok(()); + } + } else { + was_unreachable = false; + } + let assembler = self.assembler.as_mut().unwrap(); + match op { Operator::GetLocal { local_index } => { let local_index = local_index as usize; @@ -751,35 +806,25 @@ impl FunctionCodeGenerator for X64FunctionCode { let info = self.value_stack.pop()?; Self::gen_rt_pop(assembler, &info)?; } - Operator::Return => match self.returns.len() { - 0 => {} - 1 => { - if self.value_stack.values.iter().last().map(|x| x.ty) != Some(self.returns[0]) - { - return Err(CodegenError { - message: "self.value_stack.last().cloned() != Some(self.returns[0])", - }); - } - Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; + Operator::Return => { + Self::emit_return(assembler, &mut self.value_stack, &self.returns)?; + self.unreachable_depth = 1; + } + Operator::End => { + if self.control_stack.as_ref().unwrap().frames.len() == 1 { + let frame = self.control_stack.as_mut().unwrap().frames.pop().unwrap(); + Self::emit_leave_frame(assembler, &frame, &mut self.value_stack)?; dynasm!( assembler - ; mov rsp, rbp - ; pop rbp - ; ret + ; =>frame.label ); + } else { + Self::emit_block_end( + assembler, + self.control_stack.as_mut().unwrap(), + &mut self.value_stack, + )?; } - _ => { - return Err(CodegenError { - message: "multiple return values is not yet supported", - }) - } - }, - Operator::End => { - Self::emit_block_end( - assembler, - self.control_stack.as_mut().unwrap(), - &mut self.value_stack, - )?; } Operator::Br { relative_depth } => { Self::emit_jmp( @@ -788,6 +833,7 @@ impl FunctionCodeGenerator for X64FunctionCode { &mut self.value_stack, relative_depth as usize, )?; + self.unreachable_depth = 1; } _ => unimplemented!(), } @@ -799,7 +845,9 @@ impl FunctionCodeGenerator for X64FunctionCode { dynasm!( assembler - ; ud2 + ; mov rsp, rbp + ; pop rbp + ; ret ); if self.value_stack.values.len() != 0 From 7c439932f119d050f96a0f13dadec103db439314 Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 21 Feb 2019 22:04:43 +0800 Subject: [PATCH 019/100] Block and conditional branch. --- lib/dynasm-backend/src/codegen_x64.rs | 96 +++++++++++++++++++++++++-- 1 file changed, 90 insertions(+), 6 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 2bcaa5d3313..8f839a9df90 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -170,7 +170,7 @@ impl ModuleCodeGenerator for X64ModuleCode ; => begin_label ; push rbp ; mov rbp, rsp - ; int 3 + //; int 3 ); let code = X64FunctionCode { id: self.functions.len(), @@ -351,6 +351,44 @@ impl X64FunctionCode { } } + fn emit_peek_into_ax( + assembler: &mut Assembler, + value_stack: &ValueStack, + ) -> Result<(), CodegenError> { + let val = match value_stack.values.last() { + Some(x) => *x, + None => { + return Err(CodegenError { + message: "no value", + }) + } + }; + match val.location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov rax, Rq(reg as u8) + ); + } + ValueLocation::Stack => { + if is_dword(get_size_of_type(&val.ty)?) { + dynasm!( + assembler + ; mov eax, [rsp] + ); + } else { + dynasm!( + assembler + ; mov rax, [rsp] + ); + } + } + } + + Ok(()) + } + fn emit_pop_into_ax( assembler: &mut Assembler, value_stack: &mut ValueStack, @@ -388,6 +426,7 @@ impl X64FunctionCode { assembler: &mut Assembler, frame: &ControlFrame, value_stack: &mut ValueStack, + peek: bool, ) -> Result<(), CodegenError> { let ret_ty = match frame.returns.len() { 1 => Some(frame.returns[0]), @@ -417,7 +456,11 @@ impl X64FunctionCode { message: "value type != return type", }); } - Self::emit_pop_into_ax(assembler, value_stack)?; + if peek { + Self::emit_peek_into_ax(assembler, value_stack)?; + } else { + Self::emit_pop_into_ax(assembler, value_stack)?; + } } Ok(()) @@ -427,6 +470,7 @@ impl X64FunctionCode { assembler: &mut Assembler, control_stack: &mut ControlStack, value_stack: &mut ValueStack, + was_unreachable: bool, ) -> Result<(), CodegenError> { let frame = match control_stack.frames.pop() { Some(x) => x, @@ -437,7 +481,9 @@ impl X64FunctionCode { } }; - Self::emit_leave_frame(assembler, &frame, value_stack)?; + if !was_unreachable { + Self::emit_leave_frame(assembler, &frame, value_stack, false)?; + } if value_stack.values.len() != frame.value_stack_depth_before { return Err(CodegenError { @@ -497,7 +543,7 @@ impl X64FunctionCode { &control_stack.frames[control_stack.frames.len() - 1 - relative_frame_offset] }; - Self::emit_leave_frame(assembler, frame, value_stack)?; + Self::emit_leave_frame(assembler, frame, value_stack, true)?; let mut sp_diff: usize = 0; @@ -801,7 +847,21 @@ impl FunctionCodeGenerator for X64FunctionCode { }, )?; } - Operator::Block { ty } => {} + Operator::Block { ty } => { + self.control_stack + .as_mut() + .unwrap() + .frames + .push(ControlFrame { + label: assembler.new_dynamic_label(), + loop_like: false, + returns: match ty { + WpType::EmptyBlockType => vec![], + _ => vec![ty], + }, + value_stack_depth_before: self.value_stack.values.len(), + }); + } Operator::Drop => { let info = self.value_stack.pop()?; Self::gen_rt_pop(assembler, &info)?; @@ -813,7 +873,11 @@ impl FunctionCodeGenerator for X64FunctionCode { Operator::End => { if self.control_stack.as_ref().unwrap().frames.len() == 1 { let frame = self.control_stack.as_mut().unwrap().frames.pop().unwrap(); - Self::emit_leave_frame(assembler, &frame, &mut self.value_stack)?; + + if !was_unreachable { + Self::emit_leave_frame(assembler, &frame, &mut self.value_stack, false)?; + } + dynasm!( assembler ; =>frame.label @@ -823,6 +887,7 @@ impl FunctionCodeGenerator for X64FunctionCode { assembler, self.control_stack.as_mut().unwrap(), &mut self.value_stack, + was_unreachable, )?; } } @@ -835,6 +900,25 @@ impl FunctionCodeGenerator for X64FunctionCode { )?; self.unreachable_depth = 1; } + Operator::BrIf { relative_depth } => { + let no_br_label = assembler.new_dynamic_label(); + Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; // TODO: typeck? + dynasm!( + assembler + ; cmp eax, 0 + ; je =>no_br_label + ); + Self::emit_jmp( + assembler, + self.control_stack.as_ref().unwrap(), + &mut self.value_stack, + relative_depth as usize, + )?; + dynasm!( + assembler + ; =>no_br_label + ); + } _ => unimplemented!(), } Ok(()) From 08a2ec82b36c29a5f222cf4a0ce1c68b101c3c77 Mon Sep 17 00:00:00 2001 From: losfair Date: Sat, 23 Feb 2019 01:54:16 +0800 Subject: [PATCH 020/100] Unary operators, comparison opcodes, loops, etc. --- lib/dynasm-backend/src/codegen_x64.rs | 319 ++++++++++++++++++++++++-- 1 file changed, 303 insertions(+), 16 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 8f839a9df90..942c071d10b 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -115,6 +115,14 @@ impl ProtectedCaller for X64ExecutionContext { } match self.functions[index].num_params { + 0 => unsafe { + let ptr: extern "C" fn() -> i64 = ::std::mem::transmute(ptr); + Ok(vec![Value::I32(ptr() as i32)]) + }, + 1 => unsafe { + let ptr: extern "C" fn(i64) -> i64 = ::std::mem::transmute(ptr); + Ok(vec![Value::I32(ptr(value_to_i64(&_params[0])) as i32)]) + }, 2 => unsafe { let ptr: extern "C" fn(i64, i64) -> i64 = ::std::mem::transmute(ptr); Ok(vec![Value::I32( @@ -221,6 +229,41 @@ impl X64FunctionCode { Ok(()) } + /// Emits a unary operator. + fn emit_unop_i32( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + ) -> Result<(), CodegenError> { + let a = value_stack.pop()?; + if a.ty != WpType::I32 { + return Err(CodegenError { + message: "unop(i32) type mismatch", + }); + } + value_stack.push(WpType::I32); + + match a.location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + f(assembler, value_stack, reg); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; mov eax, [rsp] + ); + f(assembler, value_stack, Register::RAX); + dynasm!( + assembler + ; mov [rsp], eax + ); + } + } + + Ok(()) + } + /// Emits a binary operator. /// /// Guarantees that the first Register parameter to callback `f` will never be `Register::RAX`. @@ -232,7 +275,7 @@ impl X64FunctionCode { let (a, b) = value_stack.pop2()?; if a.ty != WpType::I32 || b.ty != WpType::I32 { return Err(CodegenError { - message: "I32Add type mismatch", + message: "binop(i32) type mismatch", }); } value_stack.push(WpType::I32); @@ -351,6 +394,27 @@ impl X64FunctionCode { } } + fn emit_cmp_i32( + assembler: &mut Assembler, + left: Register, + right: Register, + f: F, + ) { + dynasm!( + assembler + ; cmp Rd(left as u8), Rd(right as u8) + ); + f(assembler); + dynasm!( + assembler + ; xor Rd(left as u8), Rd(left as u8) + ; jmp >label_end + ; label_true: + ; mov Rd(left as u8), 1 + ; label_end: + ); + } + fn emit_peek_into_ax( assembler: &mut Assembler, value_stack: &ValueStack, @@ -392,7 +456,7 @@ impl X64FunctionCode { fn emit_pop_into_ax( assembler: &mut Assembler, value_stack: &mut ValueStack, - ) -> Result<(), CodegenError> { + ) -> Result { let val = value_stack.pop()?; match val.location { ValueLocation::Register(x) => { @@ -419,7 +483,7 @@ impl X64FunctionCode { } } - Ok(()) + Ok(val.ty) } fn emit_leave_frame( @@ -438,12 +502,6 @@ impl X64FunctionCode { } }; - if ret_ty.is_some() && frame.loop_like { - return Err(CodegenError { - message: "return value is not supported for loops", - }); - } - if value_stack.values.len() < frame.value_stack_depth_before + frame.returns.len() { return Err(CodegenError { message: "value stack underflow", @@ -483,12 +541,14 @@ impl X64FunctionCode { if !was_unreachable { Self::emit_leave_frame(assembler, &frame, value_stack, false)?; - } - - if value_stack.values.len() != frame.value_stack_depth_before { - return Err(CodegenError { - message: "value_stack.values.len() != frame.value_stack_depth_before", - }); + if value_stack.values.len() != frame.value_stack_depth_before { + return Err(CodegenError { + message: "value_stack.values.len() != frame.value_stack_depth_before", + }); + } + } else { + // No need to actually unwind the stack here. + value_stack.reset_depth(frame.value_stack_depth_before); } if !frame.loop_like { @@ -543,7 +603,9 @@ impl X64FunctionCode { &control_stack.frames[control_stack.frames.len() - 1 - relative_frame_offset] }; - Self::emit_leave_frame(assembler, frame, value_stack, true)?; + if !frame.loop_like { + Self::emit_leave_frame(assembler, frame, value_stack, true)?; + } let mut sp_diff: usize = 0; @@ -747,6 +809,52 @@ impl FunctionCodeGenerator for X64FunctionCode { } } } + Operator::SetLocal { local_index } => { + let local_index = local_index as usize; + if local_index >= self.locals.len() { + return Err(CodegenError { + message: "local out of bounds", + }); + } + let local = self.locals[local_index]; + let ty = Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; + if ty != local.ty { + return Err(CodegenError { + message: "SetLocal type mismatch", + }); + } + + if is_dword(get_size_of_type(&ty)?) { + dynasm!( + assembler + ; mov [rbp - (local.stack_offset as i32)], eax + ); + } else { + dynasm!( + assembler + ; mov [rbp - (local.stack_offset as i32)], rax + ); + } + } + Operator::I32Const { value } => { + let location = self.value_stack.push(WpType::I32); + match location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rq(reg as u8), value + ); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; sub rsp, 4 + ; mov DWORD [rsp], value + ); + } + } + } Operator::I32Add => { Self::emit_binop_i32( assembler, @@ -847,6 +955,158 @@ impl FunctionCodeGenerator for X64FunctionCode { }, )?; } + Operator::I32Eq => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; cmp Rd(left as u8), Rd(right as u8) + ; lahf + ; shr ax, 14 + ; and eax, 1 + ; mov Rd(left as u8), eax + ); + }, + )?; + } + Operator::I32Eqz => { + Self::emit_unop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; cmp Rd(reg as u8), 0 + ; lahf + ; shr ax, 14 + ; and eax, 1 + ); + if reg != Register::RAX { + dynasm!( + assembler + ; mov Rd(reg as u8), eax + ); + } + }, + )?; + } + // Comparison operators. + // https://en.wikibooks.org/wiki/X86_Assembly/Control_Flow + // TODO: Is reading flag register directly faster? + Operator::I32LtS => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jl >label_true + ); + }); + }, + )?; + } + Operator::I32LeS => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jle >label_true + ); + }); + }, + )?; + } + Operator::I32GtS => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jg >label_true + ); + }); + }, + )?; + } + Operator::I32GeS => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jge >label_true + ); + }); + }, + )?; + } + Operator::I32LtU => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jb >label_true + ); + }); + }, + )?; + } + Operator::I32LeU => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jbe >label_true + ); + }); + }, + )?; + } + Operator::I32GtU => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; ja >label_true + ); + }); + }, + )?; + } + Operator::I32GeU => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jae >label_true + ); + }); + }, + )?; + } Operator::Block { ty } => { self.control_stack .as_mut() @@ -862,6 +1122,13 @@ impl FunctionCodeGenerator for X64FunctionCode { value_stack_depth_before: self.value_stack.values.len(), }); } + Operator::Unreachable => { + dynasm!( + assembler + ; ud2 + ); + self.unreachable_depth = 1; + } Operator::Drop => { let info = self.value_stack.pop()?; Self::gen_rt_pop(assembler, &info)?; @@ -891,6 +1158,26 @@ impl FunctionCodeGenerator for X64FunctionCode { )?; } } + Operator::Loop { ty } => { + let label = assembler.new_dynamic_label(); + self.control_stack + .as_mut() + .unwrap() + .frames + .push(ControlFrame { + label: label, + loop_like: true, + returns: match ty { + WpType::EmptyBlockType => vec![], + _ => vec![ty], + }, + value_stack_depth_before: self.value_stack.values.len(), + }); + dynasm!( + assembler + ; =>label + ); + } Operator::Br { relative_depth } => { Self::emit_jmp( assembler, From 693c9fd2abbdcf9710d77b32b33dbd0e041d6f78 Mon Sep 17 00:00:00 2001 From: losfair Date: Sat, 23 Feb 2019 01:54:45 +0800 Subject: [PATCH 021/100] Single-pass backend tests. --- examples/single_pass_tests/i32.wat | 44 +++++++++++++++++++++++++++++ examples/single_pass_tests/loop.wat | 16 +++++++++++ 2 files changed, 60 insertions(+) create mode 100644 examples/single_pass_tests/i32.wat create mode 100644 examples/single_pass_tests/loop.wat diff --git a/examples/single_pass_tests/i32.wat b/examples/single_pass_tests/i32.wat new file mode 100644 index 00000000000..66daadcc9c8 --- /dev/null +++ b/examples/single_pass_tests/i32.wat @@ -0,0 +1,44 @@ +(module + (func $main (export "main") (result i32) + (local $v1 i32) + (block + (i32.const 10) + (set_local $v1) + + (i32.const 42) + (get_local $v1) + (i32.add) + (i32.const 53) + (i32.eq) + (br_if 0) + + (i32.const 1) + (i32.const -100) + (i32.const 41) + (i32.lt_s) + (i32.sub) + (br_if 0) + + (i32.const -100) + (i32.const 41) + (i32.lt_u) + (br_if 0) + + (i32.const 1) + (i32.const 100) + (i32.const -41) + (i32.gt_s) + (i32.sub) + (br_if 0) + + (i32.const 100) + (i32.const -41) + (i32.gt_u) + (br_if 0) + + (i32.const 0) + (return) + ) + (unreachable) + ) +) diff --git a/examples/single_pass_tests/loop.wat b/examples/single_pass_tests/loop.wat new file mode 100644 index 00000000000..dfdc1b1d58e --- /dev/null +++ b/examples/single_pass_tests/loop.wat @@ -0,0 +1,16 @@ +(module + (func $main (export "main") (result i32) + (local $count i32) + (local $sum i32) + (loop (result i32) + (set_local $count (i32.add (get_local $count) (i32.const 1))) + (set_local $sum (i32.add (get_local $sum) (get_local $count))) + (i32.sub (i32.const 1) (i32.eq + (get_local $count) + (i32.const 100000) + )) + (br_if 0) + (get_local $sum) + ) + ) +) From e9c03257845d0573d711c9529bd7382df28cb7f3 Mon Sep 17 00:00:00 2001 From: losfair Date: Sun, 24 Feb 2019 00:51:47 +0800 Subject: [PATCH 022/100] Update dependencies --- Cargo.lock | 2 ++ lib/dynasm-backend/Cargo.toml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 5318d9effe4..4776d7f428f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -871,8 +871,10 @@ dependencies = [ name = "wasmer-dynasm-backend" version = "0.1.0" dependencies = [ + "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "dynasm 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "dynasmrt 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "wasmer-runtime-core 0.1.2", "wasmparser 0.28.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/lib/dynasm-backend/Cargo.toml b/lib/dynasm-backend/Cargo.toml index 005e232bb1d..26ff42a8dec 100644 --- a/lib/dynasm-backend/Cargo.toml +++ b/lib/dynasm-backend/Cargo.toml @@ -12,3 +12,5 @@ wasmer-runtime-core = { path = "../runtime-core" } wasmparser = "0.28.0" dynasm = "0.3.0" dynasmrt = "0.3.1" +lazy_static = "1.2.0" +byteorder = "1" From 09cbd4aeb0f48324fa96cb3ef612f39e855889a0 Mon Sep 17 00:00:00 2001 From: losfair Date: Sun, 24 Feb 2019 00:52:32 +0800 Subject: [PATCH 023/100] Changed to using custom calling conventions; Implemented direct calls. --- lib/dynasm-backend/src/codegen.rs | 16 +- lib/dynasm-backend/src/codegen_x64.rs | 403 ++++++++++++++++++++++---- lib/dynasm-backend/src/lib.rs | 5 + lib/dynasm-backend/src/parse.rs | 4 + 4 files changed, 371 insertions(+), 57 deletions(-) diff --git a/lib/dynasm-backend/src/codegen.rs b/lib/dynasm-backend/src/codegen.rs index 18e67f5e17f..8257ceefebb 100644 --- a/lib/dynasm-backend/src/codegen.rs +++ b/lib/dynasm-backend/src/codegen.rs @@ -1,9 +1,23 @@ -use wasmer_runtime_core::backend::ProtectedCaller; +use std::sync::Arc; +use wasmer_runtime_core::{ + backend::ProtectedCaller, + structures::Map, + types::{FuncIndex, FuncSig, SigIndex}, + units::Pages, +}; use wasmparser::{Operator, Type as WpType}; pub trait ModuleCodeGenerator { fn next_function(&mut self) -> Result<&mut FCG, CodegenError>; fn finalize(self) -> Result; + fn feed_signatures( + &mut self, + signatures: Map>, + ) -> Result<(), CodegenError>; + fn feed_function_signatures( + &mut self, + assoc: Map, + ) -> Result<(), CodegenError>; } pub trait FunctionCodeGenerator { diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 942c071d10b..6949dceb77f 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -1,8 +1,10 @@ use super::codegen::*; use super::stack::{ControlFrame, ControlStack, ValueInfo, ValueLocation, ValueStack}; +use byteorder::{ByteOrder, LittleEndian}; use dynasmrt::{ x64::Assembler, AssemblyOffset, DynamicLabel, DynasmApi, DynasmLabelApi, ExecutableBuffer, }; +use std::{collections::HashMap, sync::Arc}; use wasmer_runtime_core::{ backend::{Backend, Compiler, FuncResolver, ProtectedCaller, Token, UserTrapper}, error::{CompileError, CompileResult, RuntimeError, RuntimeResult}, @@ -16,6 +18,40 @@ use wasmer_runtime_core::{ }; use wasmparser::{Operator, Type as WpType}; +lazy_static! { + static ref CALL_WASM: unsafe extern "C" fn(params: *const u8, params_len: usize, target: *const u8) -> i64 = { + let mut assembler = Assembler::new().unwrap(); + let offset = assembler.offset(); + dynasm!( + assembler + ; lea rax, [>after_call] + ; push rax + ; push rbp + ; mov rbp, rsp + ; sub rsp, rsi // params_len + ; mov rcx, 0 + ; mov r8, rsp + ; _loop: + ; cmp rsi, 0 + ; je >_loop_end + ; mov eax, [rdi] + ; mov [r8], eax + ; add r8, 4 + ; add rdi, 4 + ; sub rsi, 4 + ; jmp <_loop + ; _loop_end: + ; jmp rdx + ; after_call: + ; ret + ); + let buf = assembler.finalize().unwrap(); + let ret = unsafe { ::std::mem::transmute(buf.ptr(offset)) }; + ::std::mem::forget(buf); + ret + }; +} + #[repr(u8)] #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum Register { @@ -73,13 +109,19 @@ impl Register { #[derive(Default)] pub struct X64ModuleCodeGenerator { functions: Vec, + signatures: Option>>>, + function_signatures: Option>>, } pub struct X64FunctionCode { + signatures: Arc>>, + function_signatures: Arc>, + id: usize, begin_label: DynamicLabel, begin_offset: AssemblyOffset, assembler: Option, + function_labels: Option>, returns: Vec, locals: Vec, num_params: usize, @@ -114,23 +156,48 @@ impl ProtectedCaller for X64ExecutionContext { }); } - match self.functions[index].num_params { - 0 => unsafe { - let ptr: extern "C" fn() -> i64 = ::std::mem::transmute(ptr); - Ok(vec![Value::I32(ptr() as i32)]) - }, - 1 => unsafe { - let ptr: extern "C" fn(i64) -> i64 = ::std::mem::transmute(ptr); - Ok(vec![Value::I32(ptr(value_to_i64(&_params[0])) as i32)]) - }, - 2 => unsafe { - let ptr: extern "C" fn(i64, i64) -> i64 = ::std::mem::transmute(ptr); - Ok(vec![Value::I32( - ptr(value_to_i64(&_params[0]), value_to_i64(&_params[1])) as i32, - )]) - }, - _ => unimplemented!(), + let f = &self.functions[index]; + let mut total_size: usize = 0; + + for local in &f.locals[0..f.num_params] { + total_size += get_size_of_type(&local.ty).unwrap(); } + + let mut param_buf: Vec = vec![0; total_size]; + for i in 0..f.num_params { + let local = &f.locals[i]; + let buf = &mut param_buf[total_size - local.stack_offset..]; + let size = get_size_of_type(&local.ty).unwrap(); + + if is_dword(size) { + match _params[i] { + Value::I32(x) => LittleEndian::write_u32(buf, x as u32), + Value::F32(x) => LittleEndian::write_u32(buf, f32::to_bits(x)), + _ => { + return Err(RuntimeError::User { + msg: "signature mismatch".into(), + }) + } + } + } else { + match _params[i] { + Value::I64(x) => LittleEndian::write_u64(buf, x as u64), + Value::F64(x) => LittleEndian::write_u64(buf, f64::to_bits(x)), + _ => { + return Err(RuntimeError::User { + msg: "signature mismatch".into(), + }) + } + } + } + } + + let ret = unsafe { CALL_WASM(param_buf.as_ptr(), param_buf.len(), ptr) }; + Ok(if let Some(ty) = return_ty { + vec![Value::I64(ret)] + } else { + vec![] + }) } fn get_early_trapper(&self) -> Box { @@ -160,31 +227,41 @@ impl X64ModuleCodeGenerator { impl ModuleCodeGenerator for X64ModuleCodeGenerator { fn next_function(&mut self) -> Result<&mut X64FunctionCode, CodegenError> { - let mut assembler = match self.functions.last_mut() { - Some(x) => x.assembler.take().unwrap(), - None => match Assembler::new() { - Ok(x) => x, - Err(_) => { - return Err(CodegenError { - message: "cannot initialize assembler", - }) - } - }, + let (mut assembler, mut function_labels) = match self.functions.last_mut() { + Some(x) => ( + x.assembler.take().unwrap(), + x.function_labels.take().unwrap(), + ), + None => ( + match Assembler::new() { + Ok(x) => x, + Err(_) => { + return Err(CodegenError { + message: "cannot initialize assembler", + }) + } + }, + HashMap::new(), + ), }; - let begin_label = assembler.new_dynamic_label(); + let begin_label = *function_labels + .entry(self.functions.len()) + .or_insert_with(|| assembler.new_dynamic_label()); let begin_offset = assembler.offset(); dynasm!( assembler ; => begin_label - ; push rbp - ; mov rbp, rsp //; int 3 ); let code = X64FunctionCode { + signatures: self.signatures.as_ref().unwrap().clone(), + function_signatures: self.function_signatures.as_ref().unwrap().clone(), + id: self.functions.len(), begin_label: begin_label, begin_offset: begin_offset, assembler: Some(assembler), + function_labels: Some(function_labels), returns: vec![], locals: vec![], num_params: 0, @@ -212,6 +289,22 @@ impl ModuleCodeGenerator for X64ModuleCode functions: self.functions, }) } + + fn feed_signatures( + &mut self, + signatures: Map>, + ) -> Result<(), CodegenError> { + self.signatures = Some(Arc::new(signatures)); + Ok(()) + } + + fn feed_function_signatures( + &mut self, + assoc: Map, + ) -> Result<(), CodegenError> { + self.function_signatures = Some(Arc::new(assoc)); + Ok(()) + } } impl X64FunctionCode { @@ -486,6 +579,40 @@ impl X64FunctionCode { Ok(val.ty) } + fn emit_push_from_ax( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + ty: WpType, + ) -> Result<(), CodegenError> { + let loc = value_stack.push(ty); + match loc { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rq(reg as u8), rax + ); + } + ValueLocation::Stack => { + if is_dword(get_size_of_type(&ty)?) { + dynasm!( + assembler + ; sub rsp, 4 + ; mov [rsp], eax + ); + } else { + dynasm!( + assembler + ; sub rsp, 8 + ; mov [rsp], rax + ); + } + } + } + + Ok(()) + } + fn emit_leave_frame( assembler: &mut Assembler, frame: &ControlFrame, @@ -658,6 +785,147 @@ impl X64FunctionCode { Ok(()) } + + fn emit_call_raw( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + target: DynamicLabel, + params: &[WpType], + returns: &[WpType], + ) -> Result<(), CodegenError> { + let mut total_size: usize = 0; + for ty in params { + total_size += get_size_of_type(ty)?; + } + + if params.len() > value_stack.values.len() { + return Err(CodegenError { + message: "value stack underflow in call", + }); + } + + let mut saved_regs: Vec = Vec::new(); + + for v in &value_stack.values[0..value_stack.values.len() - params.len()] { + match v.location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; push Rq(reg as u8) + ); + saved_regs.push(reg); + } + ValueLocation::Stack => break, + } + } + + dynasm!( + assembler + ; lea rax, [>after_call] // TODO: Is this correct? + ; push rax + ; push rbp + ); + + if total_size != 0 { + dynasm!( + assembler + ; sub rsp, total_size as i32 + ); + } + + let mut offset: usize = 0; + let mut caller_stack_offset: usize = 0; + for ty in params { + let val = value_stack.pop()?; + if val.ty != *ty { + return Err(CodegenError { + message: "value type mismatch", + }); + } + let size = get_size_of_type(ty)?; + + match val.location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + if is_dword(size) { + dynasm!( + assembler + ; mov [rsp + offset as i32], Rd(reg as u8) + ); + } else { + dynasm!( + assembler + ; mov [rsp + offset as i32], Rq(reg as u8) + ); + } + } + ValueLocation::Stack => { + if is_dword(size) { + dynasm!( + assembler + ; mov eax, [rsp + (total_size + 16 + caller_stack_offset) as i32] + ; mov [rsp + offset as i32], eax + ); + } else { + dynasm!( + assembler + ; mov rax, [rsp + (total_size + 16 + caller_stack_offset) as i32] + ; mov [rsp + offset as i32], rax + ); + } + caller_stack_offset += size; + } + } + + offset += size; + } + + assert_eq!(offset, total_size); + + dynasm!( + assembler + ; mov rbp, rsp + ); + if total_size != 0 { + dynasm!( + assembler + ; add rbp, total_size as i32 + ); + } + dynasm!( + assembler + ; jmp =>target + ; after_call: + ); + if caller_stack_offset != 0 { + dynasm!( + assembler + ; add rsp, caller_stack_offset as i32 + ); + } + + match returns.len() { + 0 => {} + 1 => { + Self::emit_push_from_ax(assembler, value_stack, returns[0])?; + } + _ => { + return Err(CodegenError { + message: "more than 1 function returns are not supported", + }) + } + } + + for reg in saved_regs.iter().rev() { + dynasm!( + assembler + ; pop Rq(*reg as u8) + ); + } + + Ok(()) + } } impl FunctionCodeGenerator for X64FunctionCode { @@ -666,6 +934,11 @@ impl FunctionCodeGenerator for X64FunctionCode { Ok(()) } + /// Stack layout of a call frame: + /// - Return address + /// - Old RBP + /// - Params in reversed order, caller initialized + /// - Locals in reversed order, callee initialized fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError> { let assembler = self.assembler.as_mut().unwrap(); let size = get_size_of_type(&ty)?; @@ -676,35 +949,8 @@ impl FunctionCodeGenerator for X64FunctionCode { stack_offset: self.current_stack_offset, }); - let param_reg = match self.num_params { - 0 => Register::RDI, - 1 => Register::RSI, - 2 => Register::RDX, - 3 => Register::RCX, - 4 => Register::R8, - 5 => Register::R9, - _ => { - return Err(CodegenError { - message: "more than 6 function parameters is not yet supported", - }) - } - }; self.num_params += 1; - if is_dword(size) { - dynasm!( - assembler - ; sub rsp, 4 - ; mov [rsp], Rd(param_reg as u8) - ); - } else { - dynasm!( - assembler - ; sub rsp, 8 - ; mov [rsp], Rq(param_reg as u8) - ); - } - Ok(()) } @@ -1137,6 +1383,42 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_return(assembler, &mut self.value_stack, &self.returns)?; self.unreachable_depth = 1; } + Operator::Call { function_index } => { + let function_index = function_index as usize; + let label = *self + .function_labels + .as_mut() + .unwrap() + .entry(function_index) + .or_insert_with(|| assembler.new_dynamic_label()); + let sig_index = match self.function_signatures.get(FuncIndex::new(function_index)) { + Some(x) => *x, + None => { + return Err(CodegenError { + message: "signature not found", + }) + } + }; + let sig = match self.signatures.get(sig_index) { + Some(x) => x, + None => { + return Err(CodegenError { + message: "signature does not exist", + }) + } + }; + let param_types: Vec = + sig.params().iter().cloned().map(type_to_wp_type).collect(); + let return_types: Vec = + sig.returns().iter().cloned().map(type_to_wp_type).collect(); + Self::emit_call_raw( + assembler, + &mut self.value_stack, + label, + ¶m_types, + &return_types, + )?; + } Operator::End => { if self.control_stack.as_ref().unwrap().frames.len() == 1 { let frame = self.control_stack.as_mut().unwrap().frames.pop().unwrap(); @@ -1255,3 +1537,12 @@ fn value_to_i64(v: &Value) -> i64 { Value::I64(x) => x as u64 as i64, } } + +fn type_to_wp_type(ty: Type) -> WpType { + match ty { + Type::I32 => WpType::I32, + Type::I64 => WpType::I64, + Type::F32 => WpType::F32, + Type::F64 => WpType::F64, + } +} diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index f7f751c371f..a17e6e48b5e 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -6,6 +6,11 @@ extern crate dynasmrt; #[macro_use] extern crate dynasm; +#[macro_use] +extern crate lazy_static; + +extern crate byteorder; + mod codegen; mod codegen_x64; mod parse; diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs index daa84a13966..87a0e213f3e 100644 --- a/lib/dynasm-backend/src/parse.rs +++ b/lib/dynasm-backend/src/parse.rs @@ -89,6 +89,8 @@ pub fn read_module< let ty = ty?; info.signatures.push(Arc::new(func_type_to_func_sig(ty)?)); } + + mcg.feed_signatures(info.signatures.clone())?; } SectionCode::Import => { let import_reader = section.get_import_section_reader()?; @@ -148,6 +150,8 @@ pub fn read_module< let sigindex = SigIndex::new(sigindex as usize); info.func_assoc.push(sigindex); } + + mcg.feed_function_signatures(info.func_assoc.clone())?; } SectionCode::Table => { let table_decl_reader = section.get_table_section_reader()?; From b2f5f770941b13806d1f53e9ddcfdd1b946ca914 Mon Sep 17 00:00:00 2001 From: losfair Date: Sun, 24 Feb 2019 00:52:50 +0800 Subject: [PATCH 024/100] Add direct call test --- examples/single_pass_tests/call.wat | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 examples/single_pass_tests/call.wat diff --git a/examples/single_pass_tests/call.wat b/examples/single_pass_tests/call.wat new file mode 100644 index 00000000000..2986ac11a47 --- /dev/null +++ b/examples/single_pass_tests/call.wat @@ -0,0 +1,23 @@ +(module + (func $main (export "main") + (local $a i32) + (block + (set_local $a (i32.const 33)) + (i32.const 11) + (call $foo (get_local $a)) + (i32.add) + (i32.const 86) + (i32.eq) + (br_if 0) + (unreachable) + ) + ) + + (func $foo (param $input i32) (result i32) + (local $a i32) + (set_local $a (i32.const 42)) + (get_local $a) + (get_local $input) + (i32.add) + ) +) From dbebdf937f609d4cacc780e90786479eb0c4dad8 Mon Sep 17 00:00:00 2001 From: losfair Date: Sun, 24 Feb 2019 12:00:35 +0800 Subject: [PATCH 025/100] Code generation for br_table. --- lib/dynasm-backend/src/codegen_x64.rs | 93 ++++++++++++++++++++++++++- 1 file changed, 90 insertions(+), 3 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 6949dceb77f..bbd3d4f3ac1 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -122,6 +122,7 @@ pub struct X64FunctionCode { begin_offset: AssemblyOffset, assembler: Option, function_labels: Option>, + br_table_data: Option>>, returns: Vec, locals: Vec, num_params: usize, @@ -134,6 +135,7 @@ pub struct X64FunctionCode { pub struct X64ExecutionContext { code: ExecutableBuffer, functions: Vec, + br_table_data: Vec>, } impl ProtectedCaller for X64ExecutionContext { @@ -227,10 +229,11 @@ impl X64ModuleCodeGenerator { impl ModuleCodeGenerator for X64ModuleCodeGenerator { fn next_function(&mut self) -> Result<&mut X64FunctionCode, CodegenError> { - let (mut assembler, mut function_labels) = match self.functions.last_mut() { + let (mut assembler, mut function_labels, br_table_data) = match self.functions.last_mut() { Some(x) => ( x.assembler.take().unwrap(), x.function_labels.take().unwrap(), + x.br_table_data.take().unwrap(), ), None => ( match Assembler::new() { @@ -242,6 +245,7 @@ impl ModuleCodeGenerator for X64ModuleCode } }, HashMap::new(), + vec![], ), }; let begin_label = *function_labels @@ -262,6 +266,7 @@ impl ModuleCodeGenerator for X64ModuleCode begin_offset: begin_offset, assembler: Some(assembler), function_labels: Some(function_labels), + br_table_data: Some(br_table_data), returns: vec![], locals: vec![], num_params: 0, @@ -275,8 +280,8 @@ impl ModuleCodeGenerator for X64ModuleCode } fn finalize(mut self) -> Result { - let mut assembler = match self.functions.last_mut() { - Some(x) => x.assembler.take().unwrap(), + let (mut assembler, mut br_table_data) = match self.functions.last_mut() { + Some(x) => (x.assembler.take().unwrap(), x.br_table_data.take().unwrap()), None => { return Err(CodegenError { message: "no function", @@ -284,9 +289,16 @@ impl ModuleCodeGenerator for X64ModuleCode } }; let output = assembler.finalize().unwrap(); + + for table in &mut br_table_data { + for entry in table { + *entry = output.ptr(AssemblyOffset(*entry)) as usize; + } + } Ok(X64ExecutionContext { code: output, functions: self.functions, + br_table_data: br_table_data, }) } @@ -1201,6 +1213,30 @@ impl FunctionCodeGenerator for X64FunctionCode { }, )?; } + Operator::I32And => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; and Rd(left as u8), Rd(right as u8) + ); + }, + )?; + } + Operator::I32Or => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; or Rd(left as u8), Rd(right as u8) + ); + }, + )?; + } Operator::I32Eq => { Self::emit_binop_i32( assembler, @@ -1488,6 +1524,57 @@ impl FunctionCodeGenerator for X64FunctionCode { ; =>no_br_label ); } + Operator::BrTable { table } => { + let (targets, default_target) = match table.read_table() { + Ok(x) => x, + Err(_) => { + return Err(CodegenError { + message: "cannot read br table", + }); + } + }; + let cond_ty = Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; + if cond_ty != WpType::I32 { + return Err(CodegenError { + message: "expecting i32 for BrTable condition", + }); + } + let mut table = vec![0usize; targets.len()]; + dynasm!( + assembler + ; cmp eax, targets.len() as i32 + ; jae >default_br + ; shl rax, 3 + ; push rcx + ; mov rcx, QWORD table.as_ptr() as usize as i64 + ; add rax, rcx + ; pop rcx + ; mov rax, [rax] // assuming upper 32 bits of rax are zeroed + ; jmp rax + ); + for (i, target) in targets.iter().enumerate() { + let AssemblyOffset(offset) = assembler.offset(); + table[i] = offset; + Self::emit_jmp( + assembler, + self.control_stack.as_ref().unwrap(), + &mut self.value_stack, + *target as usize, + )?; // This does not actually modify value_stack. + } + dynasm!( + assembler + ; default_br: + ); + Self::emit_jmp( + assembler, + self.control_stack.as_ref().unwrap(), + &mut self.value_stack, + default_target as usize, + )?; + self.br_table_data.as_mut().unwrap().push(table); + self.unreachable_depth = 1; + } _ => unimplemented!(), } Ok(()) From 2432a6c9b33659fe3cefdcd9e6956e900b224b8a Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 25 Feb 2019 22:47:27 +0800 Subject: [PATCH 026/100] Fix function calls. --- lib/dynasm-backend/src/codegen_x64.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index bbd3d4f3ac1..ae0664c31f4 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -876,13 +876,13 @@ impl X64FunctionCode { if is_dword(size) { dynasm!( assembler - ; mov eax, [rsp + (total_size + 16 + caller_stack_offset) as i32] + ; mov eax, [rsp + (total_size + 16 + saved_regs.len() * 8 + caller_stack_offset) as i32] ; mov [rsp + offset as i32], eax ); } else { dynasm!( assembler - ; mov rax, [rsp + (total_size + 16 + caller_stack_offset) as i32] + ; mov rax, [rsp + (total_size + 16 + saved_regs.len() * 8 + caller_stack_offset) as i32] ; mov [rsp + offset as i32], rax ); } @@ -910,6 +910,14 @@ impl X64FunctionCode { ; jmp =>target ; after_call: ); + + for reg in saved_regs.iter().rev() { + dynasm!( + assembler + ; pop Rq(*reg as u8) + ); + } + if caller_stack_offset != 0 { dynasm!( assembler @@ -929,13 +937,6 @@ impl X64FunctionCode { } } - for reg in saved_regs.iter().rev() { - dynasm!( - assembler - ; pop Rq(*reg as u8) - ); - } - Ok(()) } } From da1a3fadb164b94578620d6b4f10e97e609a8259 Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 25 Feb 2019 22:47:41 +0800 Subject: [PATCH 027/100] Add test for br_table. --- examples/single_pass_tests/br_table.wat | 37 +++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 examples/single_pass_tests/br_table.wat diff --git a/examples/single_pass_tests/br_table.wat b/examples/single_pass_tests/br_table.wat new file mode 100644 index 00000000000..72e8f92fe6d --- /dev/null +++ b/examples/single_pass_tests/br_table.wat @@ -0,0 +1,37 @@ +(module + (func $main (export "main") + (i32.eq (call $test (i32.const 0)) (i32.const 2)) + (i32.eq (call $test (i32.const 1)) (i32.const 0)) + (i32.eq (call $test (i32.const 2)) (i32.const 1)) + (i32.eq (call $test (i32.const 3)) (i32.const 3)) + (i32.eq (call $test (i32.const 4)) (i32.const 3)) + (i32.and) + (i32.and) + (i32.and) + (i32.and) + (i32.const 1) + (i32.eq) + (br_if 0) + (unreachable) + ) + + (func $test (param $p i32) (result i32) + (block + (block + (block + (block + (block + (get_local $p) + (br_table 2 0 1 3) + ) + (return (i32.const 0)) + ) + (return (i32.const 1)) + ) + (return (i32.const 2)) + ) + (return (i32.const 3)) + ) + (unreachable) + ) +) From 80812e3809c07f15369afbc1dbe54e055d35ee74 Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 25 Feb 2019 23:29:18 +0800 Subject: [PATCH 028/100] Implement native call & fix stack alignment. --- lib/dynasm-backend/src/codegen_x64.rs | 246 ++++++++++++++------------ 1 file changed, 132 insertions(+), 114 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index ae0664c31f4..478051979cc 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -36,9 +36,9 @@ lazy_static! { ; je >_loop_end ; mov eax, [rdi] ; mov [r8], eax - ; add r8, 4 - ; add rdi, 4 - ; sub rsi, 4 + ; add r8, 8 + ; add rdi, 8 + ; sub rsi, 8 ; jmp <_loop ; _loop_end: ; jmp rdx @@ -327,7 +327,7 @@ impl X64FunctionCode { let size = get_size_of_type(&info.ty)?; dynasm!( assembler - ; add rsp, size as i32 + ; add rsp, 8 ); } } @@ -396,8 +396,7 @@ impl X64FunctionCode { } else if a.location.is_register() { dynasm!( assembler - ; mov eax, [rsp] - ; add rsp, 4 + ; pop rax ); f( assembler, @@ -411,15 +410,15 @@ impl X64FunctionCode { dynasm!( assembler ; push rcx - ; mov ecx, [rsp + 12] + ; mov ecx, [rsp + 16] ; mov eax, [rsp + 8] ); f(assembler, value_stack, Register::RCX, Register::RAX); dynasm!( assembler - ; mov [rsp + 12], ecx + ; mov [rsp + 16], ecx ; pop rcx - ; add rsp, 4 + ; add rsp, 8 ); } @@ -541,17 +540,10 @@ impl X64FunctionCode { ); } ValueLocation::Stack => { - if is_dword(get_size_of_type(&val.ty)?) { - dynasm!( - assembler - ; mov eax, [rsp] - ); - } else { - dynasm!( - assembler - ; mov rax, [rsp] - ); - } + dynasm!( + assembler + ; mov rax, [rsp] + ); } } @@ -572,19 +564,10 @@ impl X64FunctionCode { ); } ValueLocation::Stack => { - if is_dword(get_size_of_type(&val.ty)?) { - dynasm!( - assembler - ; mov eax, [rsp] - ; add rsp, 4 - ); - } else { - dynasm!( - assembler - ; mov rax, [rsp] - ; add rsp, 8 - ); - } + dynasm!( + assembler + ; pop rax + ); } } @@ -606,19 +589,10 @@ impl X64FunctionCode { ); } ValueLocation::Stack => { - if is_dword(get_size_of_type(&ty)?) { - dynasm!( - assembler - ; sub rsp, 4 - ; mov [rsp], eax - ); - } else { - dynasm!( - assembler - ; sub rsp, 8 - ; mov [rsp], rax - ); - } + dynasm!( + assembler + ; push rax + ); } } @@ -708,19 +682,10 @@ impl X64FunctionCode { ); } ValueLocation::Stack => { - if is_dword(get_size_of_type(&frame.returns[0])?) { - dynasm!( - assembler - ; sub rsp, 4 - ; mov [rsp], eax - ); - } else { - dynasm!( - assembler - ; sub rsp, 8 - ; mov [rsp], rax - ); - } + dynasm!( + assembler + ; push rax + ); } } } @@ -751,7 +716,7 @@ impl X64FunctionCode { for i in 0..value_stack.values.len() - frame.value_stack_depth_before { let vi = value_stack.values[value_stack.values.len() - 1 - i]; if vi.location == ValueLocation::Stack { - sp_diff += get_size_of_type(&vi.ty)?; + sp_diff += 8 } else { break; } @@ -798,6 +763,67 @@ impl X64FunctionCode { Ok(()) } + fn emit_native_call_trampoline( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + target: usize, + ctx1: usize, + ctx2: usize, + ) -> Result { + let label = assembler.new_dynamic_label(); + + dynasm!( + assembler + ; =>label + ); + + let mut saved_regs: Vec = Vec::new(); + + for v in &value_stack.values { + match v.location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; push Rq(reg as u8) + ); + saved_regs.push(reg); + } + ValueLocation::Stack => break, + } + } + + dynasm!( + assembler + ; mov rdi, QWORD (ctx1 as i64) + ; mov rsi, QWORD (ctx2 as i64) + ; lea rdx, [rsp + (saved_regs.len() * 8) as i32] + ; mov rcx, rbp + ; push rbp + ; mov rbp, rsp + ; mov rax, QWORD (0xfffffffffffffff0u64 as i64) + ; and rsp, rax + ; mov rax, QWORD (target as i64) + ; call rax + ; mov rsp, rbp + ; pop rbp + ); + + for reg in saved_regs.iter().rev() { + dynasm!( + assembler + ; pop Rq(*reg as u8) + ); + } + + dynasm!( + assembler + ; ret + ); + + Ok(label) + } + fn emit_call_raw( assembler: &mut Assembler, value_stack: &mut ValueStack, @@ -805,10 +831,7 @@ impl X64FunctionCode { params: &[WpType], returns: &[WpType], ) -> Result<(), CodegenError> { - let mut total_size: usize = 0; - for ty in params { - total_size += get_size_of_type(ty)?; - } + let total_size: usize = params.len() * 8; if params.len() > value_stack.values.len() { return Err(CodegenError { @@ -855,42 +878,26 @@ impl X64FunctionCode { message: "value type mismatch", }); } - let size = get_size_of_type(ty)?; match val.location { ValueLocation::Register(x) => { let reg = Register::from_scratch_reg(x); - if is_dword(size) { - dynasm!( - assembler - ; mov [rsp + offset as i32], Rd(reg as u8) - ); - } else { - dynasm!( - assembler - ; mov [rsp + offset as i32], Rq(reg as u8) - ); - } + dynasm!( + assembler + ; mov [rsp + offset as i32], Rq(reg as u8) + ); } ValueLocation::Stack => { - if is_dword(size) { - dynasm!( - assembler - ; mov eax, [rsp + (total_size + 16 + saved_regs.len() * 8 + caller_stack_offset) as i32] - ; mov [rsp + offset as i32], eax - ); - } else { - dynasm!( - assembler - ; mov rax, [rsp + (total_size + 16 + saved_regs.len() * 8 + caller_stack_offset) as i32] - ; mov [rsp + offset as i32], rax - ); - } - caller_stack_offset += size; + dynasm!( + assembler + ; mov rax, [rsp + (total_size + 16 + saved_regs.len() * 8 + caller_stack_offset) as i32] + ; mov [rsp + offset as i32], rax + ); + caller_stack_offset += 8; } } - offset += size; + offset += 8; } assert_eq!(offset, total_size); @@ -954,9 +961,8 @@ impl FunctionCodeGenerator for X64FunctionCode { /// - Locals in reversed order, callee initialized fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError> { let assembler = self.assembler.as_mut().unwrap(); - let size = get_size_of_type(&ty)?; - self.current_stack_offset += size; + self.current_stack_offset += 8; self.locals.push(Local { ty: ty, stack_offset: self.current_stack_offset, @@ -970,25 +976,40 @@ impl FunctionCodeGenerator for X64FunctionCode { fn feed_local(&mut self, ty: WpType, n: usize) -> Result<(), CodegenError> { let assembler = self.assembler.as_mut().unwrap(); let size = get_size_of_type(&ty)?; - for _ in 0..n { - // FIXME: check range of n - self.current_stack_offset += size; - self.locals.push(Local { - ty: ty, - stack_offset: self.current_stack_offset, - }); - match size { - 4 => dynasm!( + + if is_dword(size) { + for _ in 0..n { + // FIXME: check range of n + self.current_stack_offset += 4; + self.locals.push(Local { + ty: ty, + stack_offset: self.current_stack_offset, + }); + dynasm!( assembler ; sub rsp, 4 ; mov DWORD [rsp], 0 - ), - 8 => dynasm!( + ); + } + if n % 2 == 1 { + self.current_stack_offset += 4; + dynasm!( assembler - ; sub rsp, 8 - ; mov QWORD [rsp], 0 - ), - _ => unreachable!(), + ; sub rsp, 4 + ); + } + } else { + for _ in 0..n { + // FIXME: check range of n + self.current_stack_offset += 8; + self.locals.push(Local { + ty: ty, + stack_offset: self.current_stack_offset, + }); + dynasm!( + assembler + ; push 0 + ); } } Ok(()) @@ -1054,15 +1075,13 @@ impl FunctionCodeGenerator for X64FunctionCode { dynasm!( assembler ; mov eax, [rbp - (local.stack_offset as i32)] - ; sub rsp, 4 - ; mov [rsp], eax + ; push rax ); } else { dynasm!( assembler ; mov rax, [rbp - (local.stack_offset as i32)] - ; sub rsp, 8 - ; mov [rsp], rax + ; push rax ); } } @@ -1108,8 +1127,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ValueLocation::Stack => { dynasm!( assembler - ; sub rsp, 4 - ; mov DWORD [rsp], value + ; push value ); } } From 78fd995ad380e8af1cbe4f9c6d5639c9820b1798 Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 25 Feb 2019 23:38:33 +0800 Subject: [PATCH 029/100] Fix argument passing at entry. --- lib/dynasm-backend/src/codegen_x64.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 478051979cc..efbdb144f45 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -159,10 +159,10 @@ impl ProtectedCaller for X64ExecutionContext { } let f = &self.functions[index]; - let mut total_size: usize = 0; + let total_size = f.num_params * 8; - for local in &f.locals[0..f.num_params] { - total_size += get_size_of_type(&local.ty).unwrap(); + if f.num_params > 0 && f.locals[f.num_params - 1].stack_offset != total_size { + panic!("internal error: inconsistent stack layout"); } let mut param_buf: Vec = vec![0; total_size]; From b18595f2dc49eba6bd28088f422829764ab7b663 Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 25 Feb 2019 23:39:12 +0800 Subject: [PATCH 030/100] Pass command-line arguments to wasm as i32. --- src/webassembly.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/webassembly.rs b/src/webassembly.rs index bbe50b4d62d..5a603893b56 100644 --- a/src/webassembly.rs +++ b/src/webassembly.rs @@ -4,6 +4,7 @@ use wasmer_runtime::{ error::{CallResult, Result}, ImportObject, Instance, Module, }; +use wasmer_runtime_core::types::Value; use wasmer_emscripten::{is_emscripten_module, run_emscripten_instance}; @@ -86,7 +87,8 @@ pub fn run_instance( if is_emscripten_module(module) { run_emscripten_instance(module, instance, path, args)?; } else { - instance.call("main", &[])?; + let args: Vec = args.into_iter().map(|x| Value::I32(x.parse().unwrap())).collect(); + println!("{:?}", instance.call("main", &args)?); }; Ok(()) From 530294922ab2a6f7bde5f1dd434358c2f4e40be2 Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 25 Feb 2019 23:55:27 +0800 Subject: [PATCH 031/100] Allow more registers to be used. --- lib/dynasm-backend/src/codegen_x64.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index efbdb144f45..cc65bf7ecee 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -24,6 +24,11 @@ lazy_static! { let offset = assembler.offset(); dynasm!( assembler + ; push rbx + ; push r12 + ; push r13 + ; push r14 + ; push r15 ; lea rax, [>after_call] ; push rax ; push rbp @@ -43,6 +48,11 @@ lazy_static! { ; _loop_end: ; jmp rdx ; after_call: + ; pop r15 + ; pop r14 + ; pop r13 + ; pop r12 + ; pop rbx ; ret ); let buf = assembler.finalize().unwrap(); @@ -85,6 +95,11 @@ impl Register { 5 => R9, 6 => R10, 7 => R11, + 8 => RBX, + 9 => R12, + 10 => R13, + 11 => R14, + 12 => R15, _ => unreachable!(), } } From 9d8c5a5c7043e7b88ae2502e0725f72db1f861c5 Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 25 Feb 2019 23:57:12 +0800 Subject: [PATCH 032/100] Add a note on incorrect code generation. --- lib/dynasm-backend/src/codegen_x64.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index cc65bf7ecee..69d12054e96 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -286,7 +286,7 @@ impl ModuleCodeGenerator for X64ModuleCode locals: vec![], num_params: 0, current_stack_offset: 0, - value_stack: ValueStack::new(4), + value_stack: ValueStack::new(4), // FIXME: Use of R8 and above registers generates incorrect assembly. control_stack: None, unreachable_depth: 0, }; From 3c3c5db2e120d31987e8a39c81da2b95eeb4e13e Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 26 Feb 2019 20:56:10 +0800 Subject: [PATCH 033/100] Native trampolines. --- lib/dynasm-backend/src/codegen_x64.rs | 115 +++++++++++++++----------- 1 file changed, 65 insertions(+), 50 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 69d12054e96..3eb85f94a79 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -121,16 +121,28 @@ impl Register { } } -#[derive(Default)] +#[repr(u64)] +#[derive(Copy, Clone, Debug)] +pub enum TrapCode { + Unreachable, +} + +pub struct NativeTrampolines { + trap_unreachable: DynamicLabel, +} + pub struct X64ModuleCodeGenerator { functions: Vec, signatures: Option>>>, function_signatures: Option>>, + assembler: Option, + native_trampolines: Arc, } pub struct X64FunctionCode { signatures: Arc>>, function_signatures: Arc>, + native_trampolines: Arc, id: usize, begin_label: DynamicLabel, @@ -238,7 +250,23 @@ struct Local { impl X64ModuleCodeGenerator { pub fn new() -> X64ModuleCodeGenerator { - X64ModuleCodeGenerator::default() + let mut assembler = Assembler::new().unwrap(); + let nt = NativeTrampolines { + trap_unreachable: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + do_trap, + 0usize, + TrapCode::Unreachable, + ), + }; + + X64ModuleCodeGenerator { + functions: vec![], + signatures: None, + function_signatures: None, + assembler: Some(assembler), + native_trampolines: Arc::new(nt), + } } } @@ -250,18 +278,7 @@ impl ModuleCodeGenerator for X64ModuleCode x.function_labels.take().unwrap(), x.br_table_data.take().unwrap(), ), - None => ( - match Assembler::new() { - Ok(x) => x, - Err(_) => { - return Err(CodegenError { - message: "cannot initialize assembler", - }) - } - }, - HashMap::new(), - vec![], - ), + None => (self.assembler.take().unwrap(), HashMap::new(), vec![]), }; let begin_label = *function_labels .entry(self.functions.len()) @@ -275,6 +292,7 @@ impl ModuleCodeGenerator for X64ModuleCode let code = X64FunctionCode { signatures: self.signatures.as_ref().unwrap().clone(), function_signatures: self.function_signatures.as_ref().unwrap().clone(), + native_trampolines: self.native_trampolines.clone(), id: self.functions.len(), begin_label: begin_label, @@ -778,13 +796,17 @@ impl X64FunctionCode { Ok(()) } - fn emit_native_call_trampoline( + fn emit_native_call_trampoline( assembler: &mut Assembler, - value_stack: &mut ValueStack, - target: usize, - ctx1: usize, - ctx2: usize, - ) -> Result { + target: unsafe extern "C" fn( + ctx1: A, + ctx2: B, + stack_top: *mut u8, + stack_base: *mut u8, + ) -> u64, + ctx1: A, + ctx2: B, + ) -> DynamicLabel { let label = assembler.new_dynamic_label(); dynasm!( @@ -792,27 +814,15 @@ impl X64FunctionCode { ; =>label ); - let mut saved_regs: Vec = Vec::new(); - - for v in &value_stack.values { - match v.location { - ValueLocation::Register(x) => { - let reg = Register::from_scratch_reg(x); - dynasm!( - assembler - ; push Rq(reg as u8) - ); - saved_regs.push(reg); - } - ValueLocation::Stack => break, - } - } + // FIXME: Check at compile time. + assert_eq!(::std::mem::size_of::(), ::std::mem::size_of::()); + assert_eq!(::std::mem::size_of::(), ::std::mem::size_of::()); dynasm!( assembler - ; mov rdi, QWORD (ctx1 as i64) - ; mov rsi, QWORD (ctx2 as i64) - ; lea rdx, [rsp + (saved_regs.len() * 8) as i32] + ; mov rdi, QWORD (unsafe { ::std::mem::transmute_copy::(&ctx1) }) + ; mov rsi, QWORD (unsafe { ::std::mem::transmute_copy::(&ctx2) }) + ; mov rdx, rsp ; mov rcx, rbp ; push rbp ; mov rbp, rsp @@ -824,19 +834,12 @@ impl X64FunctionCode { ; pop rbp ); - for reg in saved_regs.iter().rev() { - dynasm!( - assembler - ; pop Rq(*reg as u8) - ); - } - dynasm!( assembler ; ret ); - Ok(label) + label } fn emit_call_raw( @@ -1439,10 +1442,13 @@ impl FunctionCodeGenerator for X64FunctionCode { }); } Operator::Unreachable => { - dynasm!( - assembler - ; ud2 - ); + Self::emit_call_raw( + assembler, + &mut self.value_stack, + self.native_trampolines.trap_unreachable, + &[], + &[], + )?; self.unreachable_depth = 1; } Operator::Drop => { @@ -1667,3 +1673,12 @@ fn type_to_wp_type(ty: Type) -> WpType { Type::F64 => WpType::F64, } } + +unsafe extern "C" fn do_trap( + ctx1: usize, + ctx2: TrapCode, + stack_top: *mut u8, + stack_base: *mut u8, +) -> u64 { + panic!("TRAP CODE: {:?}", ctx2); +} From d50f1cc95f03abac2b93622e3e239a498c436bcc Mon Sep 17 00:00:00 2001 From: losfair Date: Wed, 27 Feb 2019 23:38:45 +0800 Subject: [PATCH 034/100] If/Else. --- lib/dynasm-backend/src/codegen_x64.rs | 109 ++++++++++++++++++++++++-- lib/dynasm-backend/src/stack.rs | 9 +++ 2 files changed, 113 insertions(+), 5 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 3eb85f94a79..cdcb9fff6bd 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -1,5 +1,5 @@ use super::codegen::*; -use super::stack::{ControlFrame, ControlStack, ValueInfo, ValueLocation, ValueStack}; +use super::stack::{ControlFrame, ControlStack, IfElseState, ValueInfo, ValueLocation, ValueStack}; use byteorder::{ByteOrder, LittleEndian}; use dynasmrt::{ x64::Assembler, AssemblyOffset, DynamicLabel, DynasmApi, DynasmLabelApi, ExecutableBuffer, @@ -670,6 +670,52 @@ impl X64FunctionCode { Ok(()) } + fn emit_else( + assembler: &mut Assembler, + control_stack: &mut ControlStack, + value_stack: &mut ValueStack, + was_unreachable: bool, + ) -> Result<(), CodegenError> { + let frame = match control_stack.frames.last_mut() { + Some(x) => x, + None => { + return Err(CodegenError { + message: "no frame", + }) + } + }; + + if !was_unreachable { + Self::emit_leave_frame(assembler, frame, value_stack, false)?; + if value_stack.values.len() != frame.value_stack_depth_before { + return Err(CodegenError { + message: "value_stack.values.len() != frame.value_stack_depth_before", + }); + } + } else { + // No need to actually unwind the stack here. + value_stack.reset_depth(frame.value_stack_depth_before); + } + + match frame.if_else { + IfElseState::If(label) => { + dynasm!( + assembler + ; jmp =>frame.label + ; => label + ); + frame.if_else = IfElseState::Else; + } + _ => { + return Err(CodegenError { + message: "unexpected if else state", + }) + } + } + + Ok(()) + } + fn emit_block_end( assembler: &mut Assembler, control_stack: &mut ControlStack, @@ -698,10 +744,27 @@ impl X64FunctionCode { } if !frame.loop_like { - dynasm!( - assembler - ; => frame.label - ); + match frame.if_else { + IfElseState::None | IfElseState::Else => { + dynasm!( + assembler + ; => frame.label + ); + } + IfElseState::If(label) => { + dynasm!( + assembler + ; => frame.label + ; => label + ); + + if frame.returns.len() != 0 { + return Err(CodegenError { + message: "if without else, with non-empty returns", + }); + } + } + } } if frame.returns.len() == 1 { @@ -1434,6 +1497,7 @@ impl FunctionCodeGenerator for X64FunctionCode { .push(ControlFrame { label: assembler.new_dynamic_label(), loop_like: false, + if_else: IfElseState::None, returns: match ty { WpType::EmptyBlockType => vec![], _ => vec![ty], @@ -1525,6 +1589,7 @@ impl FunctionCodeGenerator for X64FunctionCode { .push(ControlFrame { label: label, loop_like: true, + if_else: IfElseState::None, returns: match ty { WpType::EmptyBlockType => vec![], _ => vec![ty], @@ -1536,6 +1601,40 @@ impl FunctionCodeGenerator for X64FunctionCode { ; =>label ); } + Operator::If { ty } => { + let label_end = assembler.new_dynamic_label(); + let label_else = assembler.new_dynamic_label(); + + Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; // TODO: typeck? + + self.control_stack + .as_mut() + .unwrap() + .frames + .push(ControlFrame { + label: label_end, + loop_like: false, + if_else: IfElseState::If(label_else), + returns: match ty { + WpType::EmptyBlockType => vec![], + _ => vec![ty], + }, + value_stack_depth_before: self.value_stack.values.len(), + }); + dynasm!( + assembler + ; cmp eax, 0 + ; je =>label_else + ); + } + Operator::Else => { + Self::emit_else( + assembler, + self.control_stack.as_mut().unwrap(), + &mut self.value_stack, + was_unreachable, + )?; + } Operator::Br { relative_depth } => { Self::emit_jmp( assembler, diff --git a/lib/dynasm-backend/src/stack.rs b/lib/dynasm-backend/src/stack.rs index 8426b708dba..8e45c6d3827 100644 --- a/lib/dynasm-backend/src/stack.rs +++ b/lib/dynasm-backend/src/stack.rs @@ -21,10 +21,18 @@ pub enum RegisterName { Invalid, }*/ +#[derive(Debug)] +pub enum IfElseState { + None, + If(DynamicLabel), + Else, +} + #[derive(Debug)] pub struct ControlFrame { pub label: DynamicLabel, pub loop_like: bool, + pub if_else: IfElseState, pub returns: Vec, pub value_stack_depth_before: usize, } @@ -147,6 +155,7 @@ impl ControlStack { frames: vec![ControlFrame { label: label, loop_like: false, + if_else: IfElseState::None, returns: returns, value_stack_depth_before: 0, }], From ec9a8f0ebd09b5f730624630a6cc4e21d4fb3ebd Mon Sep 17 00:00:00 2001 From: losfair Date: Wed, 27 Feb 2019 23:41:04 +0800 Subject: [PATCH 035/100] Add test for if/else. --- examples/single_pass_tests/if_else.wat | 33 ++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 examples/single_pass_tests/if_else.wat diff --git a/examples/single_pass_tests/if_else.wat b/examples/single_pass_tests/if_else.wat new file mode 100644 index 00000000000..533b8f13df7 --- /dev/null +++ b/examples/single_pass_tests/if_else.wat @@ -0,0 +1,33 @@ +(module + (func $main (export "main") + (local $a i32) + (set_local $a (i32.const 33)) + + (block + (call $foo (if (result i32) (i32.eq (get_local $a) (i32.const 33)) + (then (i32.const 1)) + (else (i32.const 2)) + )) + (i32.eq (i32.const 43)) + (br_if 0) + (unreachable) + ) + (block + (call $foo (if (result i32) (i32.eq (get_local $a) (i32.const 30)) + (then (i32.const 1)) + (else (i32.const 2)) + )) + (i32.eq (i32.const 44)) + (br_if 0) + (unreachable) + ) + ) + + (func $foo (param $input i32) (result i32) + (local $a i32) + (set_local $a (i32.const 42)) + (get_local $a) + (get_local $input) + (i32.add) + ) +) From b7ca5e46edbe5a8aebc895b6e6db792ea9562658 Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 28 Feb 2019 23:12:42 +0800 Subject: [PATCH 036/100] Add Select opcode. --- lib/dynasm-backend/src/codegen_x64.rs | 50 +++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index cdcb9fff6bd..6742b64f090 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -1635,6 +1635,56 @@ impl FunctionCodeGenerator for X64FunctionCode { was_unreachable, )?; } + Operator::Select => { + Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; + let v_b = self.value_stack.pop()?; + let v_a = self.value_stack.pop()?; + + if v_b.ty != v_a.ty { + return Err(CodegenError { + message: "select: type mismatch", + }); + } + + dynasm!( + assembler + ; cmp eax, 0 + ); + match v_b.location { + ValueLocation::Stack => { + dynasm!( + assembler + ; cmove rax, [rsp] + ; add rsp, 8 + ); + } + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; cmove rax, Rq(reg as u8) + ); + } + } + match v_a.location { + ValueLocation::Stack => { + dynasm!( + assembler + ; cmovne rax, [rsp] + ; add rsp, 8 + ); + } + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; cmovne rax, Rq(reg as u8) + ); + } + } + + Self::emit_push_from_ax(assembler, &mut self.value_stack, v_a.ty)?; + } Operator::Br { relative_depth } => { Self::emit_jmp( assembler, From adb309fbd8820e705510304ba4e161603405afe9 Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 28 Feb 2019 23:12:50 +0800 Subject: [PATCH 037/100] Add select test. --- examples/single_pass_tests/select.wat | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 examples/single_pass_tests/select.wat diff --git a/examples/single_pass_tests/select.wat b/examples/single_pass_tests/select.wat new file mode 100644 index 00000000000..cfdd368486e --- /dev/null +++ b/examples/single_pass_tests/select.wat @@ -0,0 +1,20 @@ +(module + (func $main (export "main") + (if (i32.eq (select + (i32.const 10) + (i32.const 20) + (i32.const 1) + ) (i32.const 10)) + (then) + (else (unreachable)) + ) + (if (i32.eq (select + (i32.const 10) + (i32.const 20) + (i32.const 0) + ) (i32.const 20)) + (then) + (else (unreachable)) + ) + ) +) From 64142c4cb865e8cd7f34e3b1c6702722d022f808 Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 28 Feb 2019 23:22:21 +0800 Subject: [PATCH 038/100] Reserve R15. --- lib/dynasm-backend/src/codegen_x64.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 6742b64f090..5a1f122e55a 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -99,7 +99,7 @@ impl Register { 9 => R12, 10 => R13, 11 => R14, - 12 => R15, + // 12 => R15, // R15 is reserved for memory base pointer. _ => unreachable!(), } } From e026adf33ec6c60fba8d40d1cd1702caeb770520 Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 28 Feb 2019 23:58:02 +0800 Subject: [PATCH 039/100] Initial work on linear memory. --- lib/dynasm-backend/src/codegen_x64.rs | 103 +++++++++++++++++++++++++- 1 file changed, 101 insertions(+), 2 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 5a1f122e55a..fb4622e1fcc 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -19,7 +19,7 @@ use wasmer_runtime_core::{ use wasmparser::{Operator, Type as WpType}; lazy_static! { - static ref CALL_WASM: unsafe extern "C" fn(params: *const u8, params_len: usize, target: *const u8) -> i64 = { + static ref CALL_WASM: unsafe extern "C" fn(params: *const u8, params_len: usize, target: *const u8, memory_base: *mut u8) -> i64 = { let mut assembler = Assembler::new().unwrap(); let offset = assembler.offset(); dynasm!( @@ -29,6 +29,7 @@ lazy_static! { ; push r13 ; push r14 ; push r15 + ; mov r15, rcx // memory_base ; lea rax, [>after_call] ; push rax ; push rbp @@ -221,7 +222,19 @@ impl ProtectedCaller for X64ExecutionContext { } } - let ret = unsafe { CALL_WASM(param_buf.as_ptr(), param_buf.len(), ptr) }; + let memory_base: *mut u8 = if _module.info.memories.len() > 0 { + if _module.info.memories.len() != 1 { + return Err(RuntimeError::User { + msg: "only one linear memory is supported".into(), + }); + } + unsafe { (**(*_vmctx).memories).base } + } else { + ::std::ptr::null_mut() + }; + //println!("MEMORY = {:?}", memory_base); + + let ret = unsafe { CALL_WASM(param_buf.as_ptr(), param_buf.len(), ptr, memory_base) }; Ok(if let Some(ty) = return_ty { vec![Value::I64(ret)] } else { @@ -1027,6 +1040,27 @@ impl X64FunctionCode { Ok(()) } + + fn emit_memory_load( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + out_ty: WpType, + ) -> Result<(), CodegenError> { + let ty = Self::emit_pop_into_ax(assembler, value_stack)?; + if ty != WpType::I32 { + return Err(CodegenError { + message: "memory address must be i32", + }); + } + dynasm!( + assembler + ; add rax, r15 + ); + f(assembler); + Self::emit_push_from_ax(assembler, value_stack, out_ty)?; + Ok(()) + } } impl FunctionCodeGenerator for X64FunctionCode { @@ -1764,6 +1798,71 @@ impl FunctionCodeGenerator for X64FunctionCode { self.br_table_data.as_mut().unwrap().push(table); self.unreachable_depth = 1; } + Operator::I32Load { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler| { + dynasm!( + assembler + ; mov eax, [rax + memarg.offset as i32] + ); + }, + WpType::I32, + )?; + } + Operator::I32Load8U { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler| { + dynasm!( + assembler + ; movzx eax, BYTE [rax + memarg.offset as i32] + ); + }, + WpType::I32, + )?; + } + Operator::I32Load8S { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler| { + dynasm!( + assembler + ; movsx eax, BYTE [rax + memarg.offset as i32] + ); + }, + WpType::I32, + )?; + } + Operator::I32Load16U { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler| { + dynasm!( + assembler + ; movzx eax, WORD [rax + memarg.offset as i32] + ); + }, + WpType::I32, + )?; + } + Operator::I32Load16S { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler| { + dynasm!( + assembler + ; movsx eax, WORD [rax + memarg.offset as i32] + ); + }, + WpType::I32, + )?; + } _ => unimplemented!(), } Ok(()) From aa75994e2f8a2f1371e2351b531277ce4586a122 Mon Sep 17 00:00:00 2001 From: losfair Date: Sat, 2 Mar 2019 01:41:31 +0800 Subject: [PATCH 040/100] Add memory opcodes and test. --- examples/single_pass_tests/memory.wat | 90 ++++++++++++ lib/dynasm-backend/src/codegen_x64.rs | 190 ++++++++++++++++++++++---- 2 files changed, 255 insertions(+), 25 deletions(-) create mode 100644 examples/single_pass_tests/memory.wat diff --git a/examples/single_pass_tests/memory.wat b/examples/single_pass_tests/memory.wat new file mode 100644 index 00000000000..9c15eb1eae8 --- /dev/null +++ b/examples/single_pass_tests/memory.wat @@ -0,0 +1,90 @@ +(module + (memory 1) + (func $main (export "main") + (call $test_stack_layout) + ) + + (func $test_stack_layout + (local $addr i32) + (set_local $addr (i32.const 16)) + + (i32.store (get_local $addr) (i32.const 10)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 655360)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 11)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 720896)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 12)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 786432)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 13)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 851968)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 14)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 917504)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 15)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 983040)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 16)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 1048576)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 17)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 1114112)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 18)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 1179648)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 19)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 1245184)) + (then) + (else (unreachable)) + ) + + (drop) + (drop) + (drop) + (drop) + (drop) + (drop) + (drop) + (drop) + (drop) + ) +) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index fb4622e1fcc..38833a4917f 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -596,9 +596,10 @@ impl X64FunctionCode { Ok(()) } - fn emit_pop_into_ax( + fn emit_pop_into_reg( assembler: &mut Assembler, value_stack: &mut ValueStack, + target: Register, ) -> Result { let val = value_stack.pop()?; match val.location { @@ -606,13 +607,13 @@ impl X64FunctionCode { let reg = Register::from_scratch_reg(x); dynasm!( assembler - ; mov rax, Rq(reg as u8) + ; mov Rq(target as u8), Rq(reg as u8) ); } ValueLocation::Stack => { dynasm!( assembler - ; pop rax + ; pop Rq(target as u8) ); } } @@ -620,10 +621,18 @@ impl X64FunctionCode { Ok(val.ty) } - fn emit_push_from_ax( + fn emit_pop_into_ax( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + ) -> Result { + Self::emit_pop_into_reg(assembler, value_stack, Register::RAX) + } + + fn emit_push_from_reg( assembler: &mut Assembler, value_stack: &mut ValueStack, ty: WpType, + source: Register, ) -> Result<(), CodegenError> { let loc = value_stack.push(ty); match loc { @@ -631,13 +640,13 @@ impl X64FunctionCode { let reg = Register::from_scratch_reg(x); dynasm!( assembler - ; mov Rq(reg as u8), rax + ; mov Rq(reg as u8), Rq(source as u8) ); } ValueLocation::Stack => { dynasm!( assembler - ; push rax + ; push Rq(source as u8) ); } } @@ -645,6 +654,14 @@ impl X64FunctionCode { Ok(()) } + fn emit_push_from_ax( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + ty: WpType, + ) -> Result<(), CodegenError> { + Self::emit_push_from_reg(assembler, value_stack, ty, Register::RAX) + } + fn emit_leave_frame( assembler: &mut Assembler, frame: &ControlFrame, @@ -1041,24 +1058,108 @@ impl X64FunctionCode { Ok(()) } - fn emit_memory_load( + fn emit_memory_load( assembler: &mut Assembler, value_stack: &mut ValueStack, f: F, out_ty: WpType, ) -> Result<(), CodegenError> { - let ty = Self::emit_pop_into_ax(assembler, value_stack)?; - if ty != WpType::I32 { + let addr_info = value_stack.pop()?; + let out_loc = value_stack.push(out_ty); + + if addr_info.ty != WpType::I32 { return Err(CodegenError { message: "memory address must be i32", }); } - dynasm!( - assembler - ; add rax, r15 - ); - f(assembler); - Self::emit_push_from_ax(assembler, value_stack, out_ty)?; + + assert_eq!(out_loc, addr_info.location); + + match addr_info.location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; add Rq(reg as u8), r15 + ); + f(assembler, reg); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; pop rax + ; add rax, r15 + ); + f(assembler, Register::RAX); + dynasm!( + assembler + ; push rax + ) + } + } + Ok(()) + } + + fn emit_memory_store( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + value_ty: WpType, + ) -> Result<(), CodegenError> { + let value_info = value_stack.pop()?; + let addr_info = value_stack.pop()?; + + if addr_info.ty != WpType::I32 { + return Err(CodegenError { + message: "memory address must be i32", + }); + } + + if value_info.ty != value_ty { + return Err(CodegenError { + message: "value type mismatch", + }); + } + + match value_info.location { + ValueLocation::Register(x) => { + let value_reg = Register::from_scratch_reg(x); + let addr_reg = + Register::from_scratch_reg(addr_info.location.get_register().unwrap()); // must be a register + dynasm!( + assembler + ; add Rq(addr_reg as u8), r15 + ); + f(assembler, addr_reg, value_reg); + } + ValueLocation::Stack => { + match addr_info.location { + ValueLocation::Register(x) => { + let addr_reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; add Rq(addr_reg as u8), r15 + ; pop rax + ); + f(assembler, addr_reg, Register::RAX); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; mov [rsp - 8], rcx // red zone + ; pop rax // value + ; pop rcx // address + ; add rcx, r15 + ); + f(assembler, Register::RCX, Register::RAX); + dynasm!( + assembler + ; mov rcx, [rsp - 24] + ); + } + } + } + } Ok(()) } } @@ -1802,10 +1903,10 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_memory_load( assembler, &mut self.value_stack, - |assembler| { + |assembler, reg| { dynasm!( assembler - ; mov eax, [rax + memarg.offset as i32] + ; mov Rd(reg as u8), [Rq(reg as u8) + memarg.offset as i32] ); }, WpType::I32, @@ -1815,10 +1916,10 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_memory_load( assembler, &mut self.value_stack, - |assembler| { + |assembler, reg| { dynasm!( assembler - ; movzx eax, BYTE [rax + memarg.offset as i32] + ; movzx Rd(reg as u8), BYTE [Rq(reg as u8) + memarg.offset as i32] ); }, WpType::I32, @@ -1828,10 +1929,10 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_memory_load( assembler, &mut self.value_stack, - |assembler| { + |assembler, reg| { dynasm!( assembler - ; movsx eax, BYTE [rax + memarg.offset as i32] + ; movsx Rd(reg as u8), BYTE [Rq(reg as u8) + memarg.offset as i32] ); }, WpType::I32, @@ -1841,10 +1942,10 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_memory_load( assembler, &mut self.value_stack, - |assembler| { + |assembler, reg| { dynasm!( assembler - ; movzx eax, WORD [rax + memarg.offset as i32] + ; movzx Rd(reg as u8), WORD [Rq(reg as u8) + memarg.offset as i32] ); }, WpType::I32, @@ -1854,10 +1955,49 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_memory_load( assembler, &mut self.value_stack, - |assembler| { + |assembler, reg| { + dynasm!( + assembler + ; movsx Rd(reg as u8), WORD [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I32, + )?; + } + Operator::I32Store { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rd(value_reg as u8) + ); + }, + WpType::I32, + )?; + } + Operator::I32Store8 { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rb(value_reg as u8) + ); + }, + WpType::I32, + )?; + } + Operator::I32Store16 { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { dynasm!( assembler - ; movsx eax, WORD [rax + memarg.offset as i32] + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rw(value_reg as u8) ); }, WpType::I32, From 2962fe25b66aa82505b37ab1cd7af1ca8b2fe2f8 Mon Sep 17 00:00:00 2001 From: losfair Date: Sat, 2 Mar 2019 01:41:44 +0800 Subject: [PATCH 041/100] Add unwinding test. --- examples/single_pass_tests/unwinding.wat | 38 ++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 examples/single_pass_tests/unwinding.wat diff --git a/examples/single_pass_tests/unwinding.wat b/examples/single_pass_tests/unwinding.wat new file mode 100644 index 00000000000..165179808b0 --- /dev/null +++ b/examples/single_pass_tests/unwinding.wat @@ -0,0 +1,38 @@ +(module + (func $main (export "main") + (i32.const 5) + (block (result i32) + (i32.const 10) + (block + (i32.const 20) + (block + (i32.const 50) + (br 1) + ) + (unreachable) + ) + ) + (i32.add) + (if (i32.eq (i32.const 15)) + (then) + (else unreachable) + ) + + (block (result i32) + (i32.const 10) + (block (result i32) + (i32.const 20) + (block + (i32.const 50) + (br 1) + ) + (unreachable) + ) + (i32.add) + ) + (if (i32.eq (i32.const 60)) + (then) + (else unreachable) + ) + ) +) From fa61b66516165770b7afae378747e2d7c5a9136c Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 5 Mar 2019 00:23:49 +0800 Subject: [PATCH 042/100] Strongly type scratch registers and fixed an unwinding issue. --- lib/dynasm-backend/src/codegen_x64.rs | 10 ++++++---- lib/dynasm-backend/src/stack.rs | 19 ++++++++++++++----- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 38833a4917f..610ac9a2fc3 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -1,5 +1,7 @@ use super::codegen::*; -use super::stack::{ControlFrame, ControlStack, IfElseState, ValueInfo, ValueLocation, ValueStack}; +use super::stack::{ + ControlFrame, ControlStack, IfElseState, ScratchRegister, ValueInfo, ValueLocation, ValueStack, +}; use byteorder::{ByteOrder, LittleEndian}; use dynasmrt::{ x64::Assembler, AssemblyOffset, DynamicLabel, DynasmApi, DynasmLabelApi, ExecutableBuffer, @@ -85,9 +87,9 @@ pub enum Register { } impl Register { - pub fn from_scratch_reg(id: u8) -> Register { + pub fn from_scratch_reg(sr: ScratchRegister) -> Register { use self::Register::*; - match id { + match sr.raw_id() { 0 => RDI, 1 => RSI, 2 => RDX, @@ -804,7 +806,7 @@ impl X64FunctionCode { let reg = Register::from_scratch_reg(x); dynasm!( assembler - ; mov Rq(x as u8), rax + ; mov Rq(reg as u8), rax ); } ValueLocation::Stack => { diff --git a/lib/dynasm-backend/src/stack.rs b/lib/dynasm-backend/src/stack.rs index 8e45c6d3827..e78eaa0ed31 100644 --- a/lib/dynasm-backend/src/stack.rs +++ b/lib/dynasm-backend/src/stack.rs @@ -56,10 +56,19 @@ pub struct ValueInfo { #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum ValueLocation { - Register(u8), + Register(ScratchRegister), Stack, } +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct ScratchRegister(u8); + +impl ScratchRegister { + pub fn raw_id(&self) -> u8 { + self.0 + } +} + impl ValueLocation { pub fn is_register(&self) -> bool { if let ValueLocation::Register(_) = *self { @@ -69,7 +78,7 @@ impl ValueLocation { } } - pub fn get_register(&self) -> Result { + pub fn get_register(&self) -> Result { if let ValueLocation::Register(id) = *self { Ok(id) } else { @@ -90,11 +99,11 @@ impl ValueStack { fn next_location(&self, loc: &ValueLocation) -> ValueLocation { match *loc { - ValueLocation::Register(x) => { + ValueLocation::Register(ScratchRegister(x)) => { if x >= self.num_regs - 1 { ValueLocation::Stack } else { - ValueLocation::Register(x + 1) + ValueLocation::Register(ScratchRegister(x + 1)) } } ValueLocation::Stack => ValueLocation::Stack, @@ -106,7 +115,7 @@ impl ValueStack { .values .last() .map(|x| self.next_location(&x.location)) - .unwrap_or(ValueLocation::Register(0)); + .unwrap_or(ValueLocation::Register(ScratchRegister(0))); self.values.push(ValueInfo { ty: ty, location: loc, From bd7698e1f22d0ad280b771305370a1b4a0f3984b Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 5 Mar 2019 00:59:05 +0800 Subject: [PATCH 043/100] 64-bit operators. --- lib/dynasm-backend/src/codegen_x64.rs | 524 +++++++++++++++++++++++++- 1 file changed, 513 insertions(+), 11 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 610ac9a2fc3..c22f00d03a8 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -383,18 +383,20 @@ impl X64FunctionCode { } /// Emits a unary operator. - fn emit_unop_i32( + fn emit_unop( assembler: &mut Assembler, value_stack: &mut ValueStack, f: F, + in_ty: WpType, + out_ty: WpType, ) -> Result<(), CodegenError> { let a = value_stack.pop()?; - if a.ty != WpType::I32 { + if a.ty != in_ty { return Err(CodegenError { message: "unop(i32) type mismatch", }); } - value_stack.push(WpType::I32); + value_stack.push(out_ty); match a.location { ValueLocation::Register(x) => { @@ -404,12 +406,12 @@ impl X64FunctionCode { ValueLocation::Stack => { dynasm!( assembler - ; mov eax, [rsp] + ; mov rax, [rsp] ); f(assembler, value_stack, Register::RAX); dynasm!( assembler - ; mov [rsp], eax + ; mov [rsp], rax ); } } @@ -417,21 +419,39 @@ impl X64FunctionCode { Ok(()) } + fn emit_unop_i32( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + ) -> Result<(), CodegenError> { + Self::emit_unop(assembler, value_stack, f, WpType::I32, WpType::I32) + } + + fn emit_unop_i64( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + ) -> Result<(), CodegenError> { + Self::emit_unop(assembler, value_stack, f, WpType::I64, WpType::I64) + } + /// Emits a binary operator. /// /// Guarantees that the first Register parameter to callback `f` will never be `Register::RAX`. - fn emit_binop_i32( + fn emit_binop( assembler: &mut Assembler, value_stack: &mut ValueStack, f: F, + in_ty: WpType, + out_ty: WpType, ) -> Result<(), CodegenError> { let (a, b) = value_stack.pop2()?; - if a.ty != WpType::I32 || b.ty != WpType::I32 { + if a.ty != in_ty || b.ty != in_ty { return Err(CodegenError { message: "binop(i32) type mismatch", }); } - value_stack.push(WpType::I32); + value_stack.push(out_ty); if a.location.is_register() && b.location.is_register() { // output is in a_reg. @@ -458,13 +478,13 @@ impl X64FunctionCode { dynasm!( assembler ; push rcx - ; mov ecx, [rsp + 16] - ; mov eax, [rsp + 8] + ; mov rcx, [rsp + 16] + ; mov rax, [rsp + 8] ); f(assembler, value_stack, Register::RCX, Register::RAX); dynasm!( assembler - ; mov [rsp + 16], ecx + ; mov [rsp + 16], rcx ; pop rcx ; add rsp, 8 ); @@ -473,6 +493,22 @@ impl X64FunctionCode { Ok(()) } + fn emit_binop_i32( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + ) -> Result<(), CodegenError> { + Self::emit_binop(assembler, value_stack, f, WpType::I32, WpType::I32) + } + + fn emit_binop_i64( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + ) -> Result<(), CodegenError> { + Self::emit_binop(assembler, value_stack, f, WpType::I64, WpType::I64) + } + fn emit_div_i32( assembler: &mut Assembler, value_stack: &ValueStack, @@ -567,6 +603,27 @@ impl X64FunctionCode { ); } + fn emit_cmp_i64( + assembler: &mut Assembler, + left: Register, + right: Register, + f: F, + ) { + dynasm!( + assembler + ; cmp Rq(left as u8), Rq(right as u8) + ); + f(assembler); + dynasm!( + assembler + ; xor Rq(left as u8), Rq(left as u8) + ; jmp >label_end + ; label_true: + ; mov Rq(left as u8), 1 + ; label_end: + ); + } + fn emit_peek_into_ax( assembler: &mut Assembler, value_stack: &ValueStack, @@ -1626,6 +1683,308 @@ impl FunctionCodeGenerator for X64FunctionCode { }, )?; } + Operator::I64Const { value } => { + let location = self.value_stack.push(WpType::I64); + match location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rq(reg as u8), QWORD value + ); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; mov rax, QWORD value + ; push rax + ); + } + } + } + Operator::I64Add => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; add Rq(left as u8), Rq(right as u8) + ) + }, + )?; + } + Operator::I64Sub => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; sub Rq(left as u8), Rq(right as u8) + ) + }, + )?; + } + Operator::I64Mul => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; imul Rq(left as u8), Rq(right as u8) + ) + }, + )?; + } + Operator::I64DivU => { + unimplemented!(); + } + Operator::I64DivS => { + unimplemented!(); + } + Operator::I64RemU => { + unimplemented!(); + } + Operator::I64RemS => { + unimplemented!(); + } + Operator::I64And => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; and Rq(left as u8), Rq(right as u8) + ); + }, + )?; + } + Operator::I64Or => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; or Rq(left as u8), Rq(right as u8) + ); + }, + )?; + } + Operator::I64Eq => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; cmp Rq(left as u8), Rq(right as u8) + ; lahf + ; shr ax, 14 + ; and eax, 1 + ; mov Rd(left as u8), eax + ); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64Eqz => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; cmp Rq(reg as u8), 0 + ; lahf + ; shr ax, 14 + ; and eax, 1 + ); + if reg != Register::RAX { + dynasm!( + assembler + ; mov Rd(reg as u8), eax + ); + } + }, + WpType::I64, + WpType::I32, + )?; + } + // Comparison operators. + // https://en.wikibooks.org/wiki/X86_Assembly/Control_Flow + // TODO: Is reading flag register directly faster? + Operator::I64LtS => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jl >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64LeS => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jle >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64GtS => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jg >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64GeS => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jge >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64LtU => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jb >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64LeU => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jbe >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64GtU => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; ja >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64GeU => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jae >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64ExtendSI32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movsx Rq(reg as u8), Rd(reg as u8) + ); + }, + WpType::I32, + WpType::I64, + )?; + } + Operator::I64ExtendUI32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + // FIXME: Is it correct to do nothing here? + }, + WpType::I32, + WpType::I64, + )?; + } + Operator::I32WrapI64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; mov Rd(reg as u8), Rd(reg as u8) // clear upper 32 bits + ); + }, + WpType::I64, + WpType::I32, + )?; + } Operator::Block { ty } => { self.control_stack .as_mut() @@ -2005,6 +2364,149 @@ impl FunctionCodeGenerator for X64FunctionCode { WpType::I32, )?; } + Operator::I64Load { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; mov Rq(reg as u8), [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I64, + )?; + } + Operator::I64Load8U { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; movzx Rq(reg as u8), BYTE [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I64, + )?; + } + Operator::I64Load8S { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; movsx Rq(reg as u8), BYTE [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I64, + )?; + } + Operator::I64Load16U { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; movzx Rq(reg as u8), WORD [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I64, + )?; + } + Operator::I64Load16S { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; movsx Rq(reg as u8), WORD [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I64, + )?; + } + Operator::I64Load32U { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; mov Rd(reg as u8), DWORD [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I64, + )?; + } + Operator::I64Load32S { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; movsx Rq(reg as u8), DWORD [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I64, + )?; + } + Operator::I64Store { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rq(value_reg as u8) + ); + }, + WpType::I64, + )?; + } + Operator::I64Store8 { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rb(value_reg as u8) + ); + }, + WpType::I64, + )?; + } + Operator::I64Store16 { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rw(value_reg as u8) + ); + }, + WpType::I64, + )?; + } + Operator::I64Store32 { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rd(value_reg as u8) + ); + }, + WpType::I64, + )?; + } _ => unimplemented!(), } Ok(()) From 5a97a25e7c70ecd9dbf078d277deedfe80e26ee8 Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 5 Mar 2019 00:59:49 +0800 Subject: [PATCH 044/100] Add basic test for i64. --- examples/single_pass_tests/i64.wat | 48 ++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 examples/single_pass_tests/i64.wat diff --git a/examples/single_pass_tests/i64.wat b/examples/single_pass_tests/i64.wat new file mode 100644 index 00000000000..ebe10eb99bd --- /dev/null +++ b/examples/single_pass_tests/i64.wat @@ -0,0 +1,48 @@ +(module + (func $main (export "main") (result i64) + (local $v1 i64) + (block + (i64.const 10) + (set_local $v1) + + (i64.const 42) + (get_local $v1) + (i64.add) + (i64.const 53) + (i64.eq) + (br_if 0) + + (i64.const 1) + (i64.const -100) + (i64.const 41) + (i64.lt_s) + (i64.extend_u/i32) + (i64.sub) + (i32.wrap/i64) + (br_if 0) + + (i64.const -100) + (i64.const 41) + (i64.lt_u) + (br_if 0) + + (i64.const 1) + (i64.const 100) + (i64.const -41) + (i64.gt_s) + (i64.extend_u/i32) + (i64.sub) + (i32.wrap/i64) + (br_if 0) + + (i64.const 100) + (i64.const -41) + (i64.gt_u) + (br_if 0) + + (i64.const 0) + (return) + ) + (unreachable) + ) +) From 27b2061ffdcbe2d3d3d164351634927fe923c1ed Mon Sep 17 00:00:00 2001 From: losfair Date: Wed, 6 Mar 2019 01:16:24 +0800 Subject: [PATCH 045/100] Implemented missing integer operators and fixed division. --- lib/dynasm-backend/src/codegen_x64.rs | 298 ++++++++++++++++++++++---- 1 file changed, 255 insertions(+), 43 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index c22f00d03a8..faa136ca085 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -517,64 +517,89 @@ impl X64FunctionCode { signed: bool, out: Register, ) { - let dx_used = Register::RDX.is_used(value_stack); - if dx_used { + let dx_save = + Register::RDX.is_used(value_stack) && left != Register::RDX && right != Register::RDX; + if dx_save { dynasm!( assembler ; push rdx ); } - if right == Register::RAX { + dynasm!( + assembler + ; push r15 + ; mov r15d, Rd(right as u8) + ; mov eax, Rd(left as u8) + ; mov edx, 0 + ); + if signed { + dynasm!( + assembler + ; idiv r15d + ); + } else { dynasm!( assembler - ; push rax - ; mov eax, Rd(left as u8) - ; mov edx, 0 - ; mov Rd(left as u8), [rsp] + ; div r15d ); + } + dynasm!( + assembler + ; mov Rd(left as u8), Rd(out as u8) + ; pop r15 + ); - if signed { - dynasm!( - assembler - ; idiv Rd(left as u8) - ); - } else { - dynasm!( - assembler - ; div Rd(left as u8) - ); - } + if dx_save { + dynasm!( + assembler + ; pop rdx + ); + } + } + fn emit_div_i64( + assembler: &mut Assembler, + value_stack: &ValueStack, + left: Register, + right: Register, + signed: bool, + out: Register, + ) { + let dx_save = + Register::RDX.is_used(value_stack) && left != Register::RDX && right != Register::RDX; + if dx_save { dynasm!( assembler - ; mov Rd(left as u8), Rd(out as u8) - ; pop rax + ; push rdx ); - } else { + } + + dynasm!( + assembler + ; push r15 + ; mov r15, Rq(right as u8) + ; mov rax, Rq(left as u8) + ; mov rdx, 0 + ); + if signed { dynasm!( assembler - ; mov eax, Rd(left as u8) - ; mov edx, 0 + ; idiv r15 ); - if signed { - dynasm!( - assembler - ; idiv Rd(right as u8) - ); - } else { - dynasm!( - assembler - ; div Rd(right as u8) - ); - } + } else { dynasm!( assembler - ; mov Rd(left as u8), Rd(out as u8) + ; div r15 ); } + dynasm!( + assembler + ; mov Rq(left as u8), Rq(out as u8) + ; pop r15 + ); - if dx_used { + if dx_save { dynasm!( assembler ; pop rdx @@ -627,7 +652,7 @@ impl X64FunctionCode { fn emit_peek_into_ax( assembler: &mut Assembler, value_stack: &ValueStack, - ) -> Result<(), CodegenError> { + ) -> Result { let val = match value_stack.values.last() { Some(x) => *x, None => { @@ -652,7 +677,7 @@ impl X64FunctionCode { } } - Ok(()) + Ok(val.ty) } fn emit_pop_into_reg( @@ -1389,6 +1414,33 @@ impl FunctionCodeGenerator for X64FunctionCode { ); } } + Operator::TeeLocal { local_index } => { + let local_index = local_index as usize; + if local_index >= self.locals.len() { + return Err(CodegenError { + message: "local out of bounds", + }); + } + let local = self.locals[local_index]; + let ty = Self::emit_peek_into_ax(assembler, &self.value_stack)?; + if ty != local.ty { + return Err(CodegenError { + message: "TeeLocal type mismatch", + }); + } + + if is_dword(get_size_of_type(&ty)?) { + dynasm!( + assembler + ; mov [rbp - (local.stack_offset as i32)], eax + ); + } else { + dynasm!( + assembler + ; mov [rbp - (local.stack_offset as i32)], rax + ); + } + } Operator::I32Const { value } => { let location = self.value_stack.push(WpType::I32); match location { @@ -1396,7 +1448,7 @@ impl FunctionCodeGenerator for X64FunctionCode { let reg = Register::from_scratch_reg(x); dynasm!( assembler - ; mov Rq(reg as u8), value + ; mov Rd(reg as u8), value ); } ValueLocation::Stack => { @@ -1547,6 +1599,23 @@ impl FunctionCodeGenerator for X64FunctionCode { }, )?; } + Operator::I32Ne => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; cmp Rd(left as u8), Rd(right as u8) + ; lahf + ; shr ax, 14 + ; and eax, 1 + ; xor eax, 1 + ; mov Rd(left as u8), eax + ); + }, + )?; + } Operator::I32Eqz => { Self::emit_unop_i32( assembler, @@ -1568,6 +1637,42 @@ impl FunctionCodeGenerator for X64FunctionCode { }, )?; } + Operator::I32Clz => { + Self::emit_unop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; lzcnt Rd(reg as u8), Rd(reg as u8) + ); + }, + )?; + } + Operator::I32Ctz => { + Self::emit_unop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; tzcnt Rd(reg as u8), Rd(reg as u8) + ); + }, + )?; + } + Operator::I32Popcnt => { + Self::emit_unop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; popcnt Rd(reg as u8), Rd(reg as u8) + ); + }, + )?; + } // Comparison operators. // https://en.wikibooks.org/wiki/X86_Assembly/Control_Flow // TODO: Is reading flag register directly faster? @@ -1739,16 +1844,68 @@ impl FunctionCodeGenerator for X64FunctionCode { )?; } Operator::I64DivU => { - unimplemented!(); + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i64( + assembler, + value_stack, + left, + right, + false, + Register::RAX, + ); + }, + )?; } Operator::I64DivS => { - unimplemented!(); + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i64( + assembler, + value_stack, + left, + right, + true, + Register::RAX, + ); + }, + )?; } Operator::I64RemU => { - unimplemented!(); + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i64( + assembler, + value_stack, + left, + right, + false, + Register::RDX, + ); + }, + )?; } Operator::I64RemS => { - unimplemented!(); + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i64( + assembler, + value_stack, + left, + right, + true, + Register::RDX, + ); + }, + )?; } Operator::I64And => { Self::emit_binop_i64( @@ -1792,6 +1949,25 @@ impl FunctionCodeGenerator for X64FunctionCode { WpType::I32, )?; } + Operator::I64Ne => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; cmp Rq(left as u8), Rq(right as u8) + ; lahf + ; shr ax, 14 + ; and eax, 1 + ; xor eax, 1 + ; mov Rd(left as u8), eax + ); + }, + WpType::I64, + WpType::I32, + )?; + } Operator::I64Eqz => { Self::emit_unop( assembler, @@ -1815,6 +1991,42 @@ impl FunctionCodeGenerator for X64FunctionCode { WpType::I32, )?; } + Operator::I64Clz => { + Self::emit_unop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; lzcnt Rq(reg as u8), Rq(reg as u8) + ); + }, + )?; + } + Operator::I64Ctz => { + Self::emit_unop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; tzcnt Rq(reg as u8), Rq(reg as u8) + ); + }, + )?; + } + Operator::I64Popcnt => { + Self::emit_unop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; popcnt Rq(reg as u8), Rq(reg as u8) + ); + }, + )?; + } // Comparison operators. // https://en.wikibooks.org/wiki/X86_Assembly/Control_Flow // TODO: Is reading flag register directly faster? From 8d2e8773e0bc6c809e461f7fc40f4706023988ac Mon Sep 17 00:00:00 2001 From: losfair Date: Wed, 6 Mar 2019 01:16:36 +0800 Subject: [PATCH 046/100] tee_local and div tests --- examples/single_pass_tests/div.wat | 36 ++++++++++++++++++++++++ examples/single_pass_tests/tee_local.wat | 11 ++++++++ 2 files changed, 47 insertions(+) create mode 100644 examples/single_pass_tests/div.wat create mode 100644 examples/single_pass_tests/tee_local.wat diff --git a/examples/single_pass_tests/div.wat b/examples/single_pass_tests/div.wat new file mode 100644 index 00000000000..3ef5d6e53f1 --- /dev/null +++ b/examples/single_pass_tests/div.wat @@ -0,0 +1,36 @@ +(module + (func $main (export "main") + (i32.const 1) + (if (i32.ne (i32.div_s (i32.const 2) (i32.const -1)) (i32.const -2)) + (then unreachable) + ) + (i32.const 2) + (if (i32.ne (i32.div_u (i32.const 2) (i32.const -1)) (i32.const 0)) + (then unreachable) + ) + (i32.const 3) + (if (i32.ne (i32.div_u (i32.const 10) (i32.const 5)) (i32.const 2)) + (then unreachable) + ) + (i32.const 4) + (if (i64.ne (i64.div_s (i64.const 300000000000) (i64.const -1)) (i64.const -300000000000)) + (then unreachable) + ) + (i32.const 5) + (if (i64.ne (i64.div_u (i64.const 300000000000) (i64.const -1)) (i64.const 0)) + (then unreachable) + ) + (i32.const 6) + (if (i64.ne (i64.div_u (i64.const 300000000000) (i64.const 2)) (i64.const 150000000000)) + (then unreachable) + ) + (i32.add) + (i32.add) + (i32.add) + (i32.add) + (i32.add) + (if (i32.ne (i32.const 21)) + (then unreachable) + ) + ) +) diff --git a/examples/single_pass_tests/tee_local.wat b/examples/single_pass_tests/tee_local.wat new file mode 100644 index 00000000000..70b9e4737b0 --- /dev/null +++ b/examples/single_pass_tests/tee_local.wat @@ -0,0 +1,11 @@ +(module + (func $main (export "main") + (local $x i32) + (tee_local $x (i32.const 3)) + (i32.add (i32.const 4)) + (if (i32.eq (i32.const 7)) + (then) + (else unreachable) + ) + ) +) From 12c213739a921e2a4cb339ccb60ea72a3b6e8d75 Mon Sep 17 00:00:00 2001 From: losfair Date: Fri, 8 Mar 2019 01:31:37 +0800 Subject: [PATCH 047/100] Hack around calling imports. Not yet working. --- lib/dynasm-backend/src/codegen.rs | 1 + lib/dynasm-backend/src/codegen_x64.rs | 64 +++++++++++++++++++++++++-- lib/dynasm-backend/src/parse.rs | 8 +++- 3 files changed, 68 insertions(+), 5 deletions(-) diff --git a/lib/dynasm-backend/src/codegen.rs b/lib/dynasm-backend/src/codegen.rs index 8257ceefebb..9c223d0f9bd 100644 --- a/lib/dynasm-backend/src/codegen.rs +++ b/lib/dynasm-backend/src/codegen.rs @@ -18,6 +18,7 @@ pub trait ModuleCodeGenerator { &mut self, assoc: Map, ) -> Result<(), CodegenError>; + fn feed_import_function(&mut self) -> Result<(), CodegenError>; } pub trait FunctionCodeGenerator { diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index faa136ca085..0d3730315b6 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -6,6 +6,7 @@ use byteorder::{ByteOrder, LittleEndian}; use dynasmrt::{ x64::Assembler, AssemblyOffset, DynamicLabel, DynasmApi, DynasmLabelApi, ExecutableBuffer, }; +use std::sync::Mutex; use std::{collections::HashMap, sync::Arc}; use wasmer_runtime_core::{ backend::{Backend, Compiler, FuncResolver, ProtectedCaller, Token, UserTrapper}, @@ -134,12 +135,21 @@ pub struct NativeTrampolines { trap_unreachable: DynamicLabel, } +#[repr(transparent)] +struct CtxPtr(*mut vm::Ctx); + +unsafe impl Send for CtxPtr {} +unsafe impl Sync for CtxPtr {} + pub struct X64ModuleCodeGenerator { functions: Vec, signatures: Option>>>, function_signatures: Option>>, + function_labels: Option>, assembler: Option, native_trampolines: Arc, + vmctx: Mutex>, // TODO: Fix this + func_import_count: usize, } pub struct X64FunctionCode { @@ -166,6 +176,8 @@ pub struct X64ExecutionContext { code: ExecutableBuffer, functions: Vec, br_table_data: Vec>, + vmctx: Mutex>, + func_import_count: usize, } impl ProtectedCaller for X64ExecutionContext { @@ -178,7 +190,7 @@ impl ProtectedCaller for X64ExecutionContext { _vmctx: *mut vm::Ctx, _: Token, ) -> RuntimeResult> { - let index = _func_index.index(); + let index = _func_index.index() - self.func_import_count; let ptr = self.code.ptr(self.functions[index].begin_offset); let return_ty = self.functions[index].returns.last().cloned(); @@ -236,6 +248,8 @@ impl ProtectedCaller for X64ExecutionContext { }; //println!("MEMORY = {:?}", memory_base); + self.vmctx.lock().unwrap().0 = _vmctx; + let ret = unsafe { CALL_WASM(param_buf.as_ptr(), param_buf.len(), ptr, memory_base) }; Ok(if let Some(ty) = return_ty { vec![Value::I64(ret)] @@ -279,8 +293,11 @@ impl X64ModuleCodeGenerator { functions: vec![], signatures: None, function_signatures: None, + function_labels: Some(HashMap::new()), assembler: Some(assembler), native_trampolines: Arc::new(nt), + vmctx: Mutex::new(Box::new(CtxPtr(::std::ptr::null_mut()))), + func_import_count: 0, } } } @@ -293,10 +310,14 @@ impl ModuleCodeGenerator for X64ModuleCode x.function_labels.take().unwrap(), x.br_table_data.take().unwrap(), ), - None => (self.assembler.take().unwrap(), HashMap::new(), vec![]), + None => ( + self.assembler.take().unwrap(), + self.function_labels.take().unwrap(), + vec![], + ), }; let begin_label = *function_labels - .entry(self.functions.len()) + .entry(self.functions.len() + self.func_import_count) .or_insert_with(|| assembler.new_dynamic_label()); let begin_offset = assembler.offset(); dynasm!( @@ -347,6 +368,8 @@ impl ModuleCodeGenerator for X64ModuleCode code: output, functions: self.functions, br_table_data: br_table_data, + vmctx: self.vmctx, + func_import_count: self.func_import_count, }) } @@ -365,6 +388,32 @@ impl ModuleCodeGenerator for X64ModuleCode self.function_signatures = Some(Arc::new(assoc)); Ok(()) } + + fn feed_import_function(&mut self) -> Result<(), CodegenError> { + let labels = match self.function_labels.as_mut() { + Some(x) => x, + None => { + return Err(CodegenError { + message: "got function import after code", + }) + } + }; + let id = labels.len(); + + let mut vmctx = self.vmctx.lock().unwrap(); + + let label = X64FunctionCode::emit_native_call_trampoline( + self.assembler.as_mut().unwrap(), + invoke_import, + &mut vmctx.0 as *mut *mut vm::Ctx, + id, + ); + labels.insert(id, label); + + self.func_import_count += 1; + + Ok(()) + } } impl X64FunctionCode { @@ -2786,3 +2835,12 @@ unsafe extern "C" fn do_trap( ) -> u64 { panic!("TRAP CODE: {:?}", ctx2); } + +unsafe extern "C" fn invoke_import( + ctx1: *mut *mut vm::Ctx, + ctx2: usize, + stack_top: *mut u8, + stack_base: *mut u8, +) -> u64 { + panic!("INVOKE IMPORT: {}, under context {:?}", ctx2, *ctx1); +} diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs index 87a0e213f3e..4b7aeee2d27 100644 --- a/lib/dynasm-backend/src/parse.rs +++ b/lib/dynasm-backend/src/parse.rs @@ -112,6 +112,7 @@ pub fn read_module< let sigindex = SigIndex::new(sigindex as usize); info.imported_functions.push(import_name); info.func_assoc.push(sigindex); + mcg.feed_import_function()?; } ImportSectionEntryType::Table(table_ty) => { assert_eq!(table_ty.element_type, WpType::AnyFunc); @@ -140,6 +141,9 @@ pub fn read_module< } } } + + info.namespace_table = namespace_builder.finish(); + info.name_table = name_builder.finish(); } SectionCode::Function => { let func_decl_reader = section.get_function_section_reader()?; @@ -267,9 +271,9 @@ pub fn read_module< } SectionCode::Code => { let mut code_reader = section.get_code_section_reader()?; - if code_reader.get_count() as usize != info.func_assoc.len() { + if code_reader.get_count() as usize > info.func_assoc.len() { return Err(BinaryReaderError { - message: "code_reader.get_count() != info.func_assoc.len()", + message: "code_reader.get_count() > info.func_assoc.len()", offset: ::std::usize::MAX, } .into()); From 258dea64d8a0dde107ee3f2d72f94b6e3a13917f Mon Sep 17 00:00:00 2001 From: losfair Date: Sat, 9 Mar 2019 00:07:13 +0800 Subject: [PATCH 048/100] Fix calling imports. --- lib/dynasm-backend/src/codegen_x64.rs | 111 +++++++++++++++++++++----- lib/dynasm-backend/src/parse.rs | 7 +- 2 files changed, 96 insertions(+), 22 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 0d3730315b6..275857d11fa 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -22,7 +22,7 @@ use wasmer_runtime_core::{ use wasmparser::{Operator, Type as WpType}; lazy_static! { - static ref CALL_WASM: unsafe extern "C" fn(params: *const u8, params_len: usize, target: *const u8, memory_base: *mut u8) -> i64 = { + static ref CALL_WASM: unsafe extern "C" fn(params: *const u8, params_len: usize, target: *const u8, memory_base: *mut u8, vmctx: *mut vm::Ctx) -> i64 = { let mut assembler = Assembler::new().unwrap(); let offset = assembler.offset(); dynasm!( @@ -33,6 +33,7 @@ lazy_static! { ; push r14 ; push r15 ; mov r15, rcx // memory_base + ; mov r14, r8 // vmctx ; lea rax, [>after_call] ; push rax ; push rbp @@ -64,6 +65,73 @@ lazy_static! { ::std::mem::forget(buf); ret }; + + static ref CONSTRUCT_STACK_AND_CALL_NATIVE: unsafe extern "C" fn (stack_top: *mut u8, stack_base: *mut u8, ctx: *mut vm::Ctx, target: *const vm::Func) -> u64 = { + let mut assembler = Assembler::new().unwrap(); + let offset = assembler.offset(); + dynasm!( + assembler + ; push r15 + ; push r14 + ; push r13 + ; push r12 + ; sub rsp, 8 // align to 16 bytes + + ; mov r15, rdi + ; mov r14, rsi + ; mov r13, rdx + ; mov r12, rcx + + ; mov rdi, r13 + ; cmp r15, r14 + ; je >stack_ready + ; mov rdi, [r15] + ; add r15, 8 + + ; mov rsi, r13 + ; cmp r15, r14 + ; je >stack_ready + ; mov rsi, [r15] + ; add r15, 8 + + ; mov rdx, r13 + ; cmp r15, r14 + ; je >stack_ready + ; mov rdx, [r15] + ; add r15, 8 + + ; mov rcx, r13 + ; cmp r15, r14 + ; je >stack_ready + ; mov rcx, [r15] + ; add r15, 8 + + ; mov r8, r13 + ; cmp r15, r14 + ; je >stack_ready + ; mov r8, [r15] + ; add r15, 8 + + ; mov r9, r13 + ; cmp r15, r14 + ; je >stack_ready + ; ud2 // FIXME + + ; stack_ready: + ; call r12 + + ; add rsp, 8 + ; pop r12 + ; pop r13 + ; pop r14 + ; pop r15 + ; ret + ); + let buf = assembler.finalize().unwrap(); + let ret = unsafe { ::std::mem::transmute(buf.ptr(offset)) }; + ::std::mem::forget(buf); + ret + }; } #[repr(u8)] @@ -102,7 +170,7 @@ impl Register { 8 => RBX, 9 => R12, 10 => R13, - 11 => R14, + // 11 => R14, // R14 is reserved for vmctx. // 12 => R15, // R15 is reserved for memory base pointer. _ => unreachable!(), } @@ -148,7 +216,6 @@ pub struct X64ModuleCodeGenerator { function_labels: Option>, assembler: Option, native_trampolines: Arc, - vmctx: Mutex>, // TODO: Fix this func_import_count: usize, } @@ -176,7 +243,6 @@ pub struct X64ExecutionContext { code: ExecutableBuffer, functions: Vec, br_table_data: Vec>, - vmctx: Mutex>, func_import_count: usize, } @@ -248,9 +314,15 @@ impl ProtectedCaller for X64ExecutionContext { }; //println!("MEMORY = {:?}", memory_base); - self.vmctx.lock().unwrap().0 = _vmctx; - - let ret = unsafe { CALL_WASM(param_buf.as_ptr(), param_buf.len(), ptr, memory_base) }; + let ret = unsafe { + CALL_WASM( + param_buf.as_ptr(), + param_buf.len(), + ptr, + memory_base, + _vmctx, + ) + }; Ok(if let Some(ty) = return_ty { vec![Value::I64(ret)] } else { @@ -296,7 +368,6 @@ impl X64ModuleCodeGenerator { function_labels: Some(HashMap::new()), assembler: Some(assembler), native_trampolines: Arc::new(nt), - vmctx: Mutex::new(Box::new(CtxPtr(::std::ptr::null_mut()))), func_import_count: 0, } } @@ -368,7 +439,6 @@ impl ModuleCodeGenerator for X64ModuleCode code: output, functions: self.functions, br_table_data: br_table_data, - vmctx: self.vmctx, func_import_count: self.func_import_count, }) } @@ -400,12 +470,10 @@ impl ModuleCodeGenerator for X64ModuleCode }; let id = labels.len(); - let mut vmctx = self.vmctx.lock().unwrap(); - let label = X64FunctionCode::emit_native_call_trampoline( self.assembler.as_mut().unwrap(), invoke_import, - &mut vmctx.0 as *mut *mut vm::Ctx, + 0, id, ); labels.insert(id, label); @@ -1029,6 +1097,7 @@ impl X64FunctionCode { ctx2: B, stack_top: *mut u8, stack_base: *mut u8, + vmctx: *mut vm::Ctx, ) -> u64, ctx1: A, ctx2: B, @@ -1050,18 +1119,13 @@ impl X64FunctionCode { ; mov rsi, QWORD (unsafe { ::std::mem::transmute_copy::(&ctx2) }) ; mov rdx, rsp ; mov rcx, rbp - ; push rbp - ; mov rbp, rsp + ; mov r8, r14 // vmctx ; mov rax, QWORD (0xfffffffffffffff0u64 as i64) ; and rsp, rax ; mov rax, QWORD (target as i64) ; call rax ; mov rsp, rbp ; pop rbp - ); - - dynasm!( - assembler ; ret ); @@ -2832,15 +2896,20 @@ unsafe extern "C" fn do_trap( ctx2: TrapCode, stack_top: *mut u8, stack_base: *mut u8, + vmctx: *mut vm::Ctx, ) -> u64 { panic!("TRAP CODE: {:?}", ctx2); } unsafe extern "C" fn invoke_import( - ctx1: *mut *mut vm::Ctx, - ctx2: usize, + _unused: usize, + import_id: usize, stack_top: *mut u8, stack_base: *mut u8, + vmctx: *mut vm::Ctx, ) -> u64 { - panic!("INVOKE IMPORT: {}, under context {:?}", ctx2, *ctx1); + let vmctx: &mut vm::Ctx = &mut *vmctx; + let import = (*vmctx.imported_funcs.offset(import_id as isize)).func; + + CONSTRUCT_STACK_AND_CALL_NATIVE(stack_top, stack_base, vmctx, import) } diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs index 4b7aeee2d27..1fd185d954d 100644 --- a/lib/dynasm-backend/src/parse.rs +++ b/lib/dynasm-backend/src/parse.rs @@ -283,7 +283,12 @@ pub fn read_module< let mut fcg = mcg.next_function()?; let sig = info .signatures - .get(*info.func_assoc.get(FuncIndex::new(i as usize)).unwrap()) + .get( + *info + .func_assoc + .get(FuncIndex::new(i as usize + info.imported_functions.len())) + .unwrap(), + ) .unwrap(); for ret in sig.returns() { fcg.feed_return(type_to_wp_type(*ret))?; From 4c4743e7cd096b6a995582d6177a04478ba3768b Mon Sep 17 00:00:00 2001 From: losfair Date: Sat, 9 Mar 2019 00:32:18 +0800 Subject: [PATCH 049/100] Globals. --- lib/dynasm-backend/src/codegen.rs | 3 +- lib/dynasm-backend/src/codegen_x64.rs | 65 ++++++++++++++++++++++----- lib/dynasm-backend/src/parse.rs | 2 +- 3 files changed, 58 insertions(+), 12 deletions(-) diff --git a/lib/dynasm-backend/src/codegen.rs b/lib/dynasm-backend/src/codegen.rs index 9c223d0f9bd..b9cbec184be 100644 --- a/lib/dynasm-backend/src/codegen.rs +++ b/lib/dynasm-backend/src/codegen.rs @@ -4,6 +4,7 @@ use wasmer_runtime_core::{ structures::Map, types::{FuncIndex, FuncSig, SigIndex}, units::Pages, + module::ModuleInfo, }; use wasmparser::{Operator, Type as WpType}; @@ -26,7 +27,7 @@ pub trait FunctionCodeGenerator { fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError>; fn feed_local(&mut self, ty: WpType, n: usize) -> Result<(), CodegenError>; fn begin_body(&mut self) -> Result<(), CodegenError>; - fn feed_opcode(&mut self, op: Operator) -> Result<(), CodegenError>; + fn feed_opcode(&mut self, op: Operator, module_info: &ModuleInfo) -> Result<(), CodegenError>; fn finalize(&mut self) -> Result<(), CodegenError>; } diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 275857d11fa..adbacfa5601 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -14,10 +14,10 @@ use wasmer_runtime_core::{ module::{ModuleInfo, ModuleInner, StringTable}, structures::{Map, TypedIndex}, types::{ - FuncIndex, FuncSig, GlobalIndex, LocalFuncIndex, MemoryIndex, SigIndex, TableIndex, Type, - Value, + FuncIndex, FuncSig, GlobalIndex, LocalFuncIndex, LocalGlobalIndex, MemoryIndex, SigIndex, + TableIndex, Type, Value, }, - vm::{self, ImportBacking}, + vm::{self, ImportBacking, LocalGlobal}, }; use wasmparser::{Operator, Type as WpType}; @@ -203,12 +203,6 @@ pub struct NativeTrampolines { trap_unreachable: DynamicLabel, } -#[repr(transparent)] -struct CtxPtr(*mut vm::Ctx); - -unsafe impl Send for CtxPtr {} -unsafe impl Sync for CtxPtr {} - pub struct X64ModuleCodeGenerator { functions: Vec, signatures: Option>>>, @@ -1434,7 +1428,7 @@ impl FunctionCodeGenerator for X64FunctionCode { )); Ok(()) } - fn feed_opcode(&mut self, op: Operator) -> Result<(), CodegenError> { + fn feed_opcode(&mut self, op: Operator, module_info: &ModuleInfo) -> Result<(), CodegenError> { let was_unreachable; if self.unreachable_depth > 0 { @@ -1458,6 +1452,57 @@ impl FunctionCodeGenerator for X64FunctionCode { let assembler = self.assembler.as_mut().unwrap(); match op { + Operator::GetGlobal { global_index } => { + let global_index = global_index as usize; + if global_index >= module_info.globals.len() { + return Err(CodegenError { + message: "global out of bounds", + }); + } + dynasm!( + assembler + ; mov rax, r14 => vm::Ctx.globals + ; mov rax, [rax + (global_index as i32) * 8] + ; mov rax, rax => LocalGlobal.data + ); + Self::emit_push_from_ax( + assembler, + &mut self.value_stack, + type_to_wp_type( + module_info.globals[LocalGlobalIndex::new(global_index)] + .desc + .ty, + ), + )?; + } + Operator::SetGlobal { global_index } => { + let global_index = global_index as usize; + if global_index >= module_info.globals.len() { + return Err(CodegenError { + message: "global out of bounds", + }); + } + let ty = Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; + if ty + != type_to_wp_type( + module_info.globals[LocalGlobalIndex::new(global_index)] + .desc + .ty, + ) + { + return Err(CodegenError { + message: "type mismatch in SetGlobal", + }); + } + dynasm!( + assembler + ; push rbx + ; mov rbx, r14 => vm::Ctx.globals + ; mov rbx, [rbx + (global_index as i32) * 8] + ; mov rbx => LocalGlobal.data, rax + ; pop rbx + ); + } Operator::GetLocal { local_index } => { let local_index = local_index as usize; if local_index >= self.locals.len() { diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs index 1fd185d954d..ddd16ea5024 100644 --- a/lib/dynasm-backend/src/parse.rs +++ b/lib/dynasm-backend/src/parse.rs @@ -303,7 +303,7 @@ pub fn read_module< fcg.begin_body()?; for op in item.get_operators_reader()? { let op = op?; - fcg.feed_opcode(op)?; + fcg.feed_opcode(op, &info)?; } fcg.finalize()?; } From 3efccbe0f7875b4c1be4bc62a03dcb71a765d80d Mon Sep 17 00:00:00 2001 From: losfair Date: Sat, 9 Mar 2019 00:38:13 +0800 Subject: [PATCH 050/100] Support imported globals. --- lib/dynasm-backend/src/codegen_x64.rs | 54 ++++++++++++++++++++------- 1 file changed, 40 insertions(+), 14 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index adbacfa5601..3da9f33840c 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -1453,15 +1453,27 @@ impl FunctionCodeGenerator for X64FunctionCode { match op { Operator::GetGlobal { global_index } => { - let global_index = global_index as usize; - if global_index >= module_info.globals.len() { - return Err(CodegenError { - message: "global out of bounds", - }); + let mut global_index = global_index as usize; + if global_index < module_info.imported_globals.len() { + dynasm!( + assembler + ; mov rax, r14 => vm::Ctx.imported_globals + ); + } else { + global_index -= module_info.imported_globals.len(); + if global_index >= module_info.globals.len() { + return Err(CodegenError { + message: "global out of bounds", + }); + } + dynasm!( + assembler + ; mov rax, r14 => vm::Ctx.globals + ); } + dynasm!( assembler - ; mov rax, r14 => vm::Ctx.globals ; mov rax, [rax + (global_index as i32) * 8] ; mov rax, rax => LocalGlobal.data ); @@ -1476,13 +1488,29 @@ impl FunctionCodeGenerator for X64FunctionCode { )?; } Operator::SetGlobal { global_index } => { - let global_index = global_index as usize; - if global_index >= module_info.globals.len() { - return Err(CodegenError { - message: "global out of bounds", - }); - } let ty = Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; + + let mut global_index = global_index as usize; + if global_index < module_info.imported_globals.len() { + dynasm!( + assembler + ; push rbx + ; mov rbx, r14 => vm::Ctx.imported_globals + ); + } else { + global_index -= module_info.imported_globals.len(); + if global_index >= module_info.globals.len() { + return Err(CodegenError { + message: "global out of bounds", + }); + } + dynasm!( + assembler + ; push rbx + ; mov rbx, r14 => vm::Ctx.globals + ); + } + if ty != type_to_wp_type( module_info.globals[LocalGlobalIndex::new(global_index)] @@ -1496,8 +1524,6 @@ impl FunctionCodeGenerator for X64FunctionCode { } dynasm!( assembler - ; push rbx - ; mov rbx, r14 => vm::Ctx.globals ; mov rbx, [rbx + (global_index as i32) * 8] ; mov rbx => LocalGlobal.data, rax ; pop rbx From 25034ece07a2cf40d0429b7ef7f9613a5c90b0e1 Mon Sep 17 00:00:00 2001 From: losfair Date: Sat, 9 Mar 2019 02:57:23 +0800 Subject: [PATCH 051/100] Almost finished indirect calls. --- lib/dynasm-backend/src/codegen_x64.rs | 202 ++++++++++++++++++++++++-- lib/dynasm-backend/src/lib.rs | 3 +- 2 files changed, 194 insertions(+), 11 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 3da9f33840c..175a3808c24 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -6,6 +6,8 @@ use byteorder::{ByteOrder, LittleEndian}; use dynasmrt::{ x64::Assembler, AssemblyOffset, DynamicLabel, DynasmApi, DynasmLabelApi, ExecutableBuffer, }; +use std::cell::RefCell; +use std::ptr::NonNull; use std::sync::Mutex; use std::{collections::HashMap, sync::Arc}; use wasmer_runtime_core::{ @@ -17,10 +19,14 @@ use wasmer_runtime_core::{ FuncIndex, FuncSig, GlobalIndex, LocalFuncIndex, LocalGlobalIndex, MemoryIndex, SigIndex, TableIndex, Type, Value, }, - vm::{self, ImportBacking, LocalGlobal}, + vm::{self, ImportBacking, LocalGlobal, LocalTable}, }; use wasmparser::{Operator, Type as WpType}; +thread_local! { + static CURRENT_EXECUTION_CONTEXT: RefCell> = RefCell::new(Vec::new()); +} + lazy_static! { static ref CALL_WASM: unsafe extern "C" fn(params: *const u8, params_len: usize, target: *const u8, memory_base: *mut u8, vmctx: *mut vm::Ctx) -> i64 = { let mut assembler = Assembler::new().unwrap(); @@ -207,7 +213,7 @@ pub struct X64ModuleCodeGenerator { functions: Vec, signatures: Option>>>, function_signatures: Option>>, - function_labels: Option>, + function_labels: Option)>>, assembler: Option, native_trampolines: Arc, func_import_count: usize, @@ -222,7 +228,7 @@ pub struct X64FunctionCode { begin_label: DynamicLabel, begin_offset: AssemblyOffset, assembler: Option, - function_labels: Option>, + function_labels: Option)>>, br_table_data: Option>>, returns: Vec, locals: Vec, @@ -233,13 +239,35 @@ pub struct X64FunctionCode { unreachable_depth: usize, } +enum FuncPtrInner {} +#[repr(transparent)] +struct FuncPtr(*const FuncPtrInner); +unsafe impl Send for FuncPtr {} +unsafe impl Sync for FuncPtr {} + pub struct X64ExecutionContext { code: ExecutableBuffer, functions: Vec, + signatures: Arc>>, + function_signatures: Arc>, + function_pointers: Vec, br_table_data: Vec>, func_import_count: usize, } +impl FuncResolver for X64ExecutionContext { + fn get( + &self, + _module: &ModuleInner, + _local_func_index: LocalFuncIndex, + ) -> Option> { + NonNull::new( + self.function_pointers[_local_func_index.index() as usize + self.func_import_count].0 + as *mut vm::Func, + ) + } +} + impl ProtectedCaller for X64ExecutionContext { fn call( &self, @@ -308,6 +336,8 @@ impl ProtectedCaller for X64ExecutionContext { }; //println!("MEMORY = {:?}", memory_base); + CURRENT_EXECUTION_CONTEXT.with(|x| x.borrow_mut().push(self)); + let ret = unsafe { CALL_WASM( param_buf.as_ptr(), @@ -317,6 +347,9 @@ impl ProtectedCaller for X64ExecutionContext { _vmctx, ) }; + + CURRENT_EXECUTION_CONTEXT.with(|x| x.borrow_mut().pop().unwrap()); + Ok(if let Some(ty) = return_ty { vec![Value::I64(ret)] } else { @@ -381,10 +414,14 @@ impl ModuleCodeGenerator for X64ModuleCode vec![], ), }; - let begin_label = *function_labels - .entry(self.functions.len() + self.func_import_count) - .or_insert_with(|| assembler.new_dynamic_label()); let begin_offset = assembler.offset(); + let begin_label_info = function_labels + .entry(self.functions.len() + self.func_import_count) + .or_insert_with(|| (assembler.new_dynamic_label(), None)); + + begin_label_info.1 = Some(begin_offset); + let begin_label = begin_label_info.0; + dynasm!( assembler ; => begin_label @@ -429,11 +466,56 @@ impl ModuleCodeGenerator for X64ModuleCode *entry = output.ptr(AssemblyOffset(*entry)) as usize; } } + + let function_labels = if let Some(x) = self.functions.last() { + x.function_labels.as_ref().unwrap() + } else { + self.function_labels.as_ref().unwrap() + }; + let mut out_labels: Vec = vec![]; + + for i in 0..function_labels.len() { + let (_, offset) = match function_labels.get(&i) { + Some(x) => x, + None => { + return Err(CodegenError { + message: "label not found", + }) + } + }; + let offset = match offset { + Some(x) => x, + None => { + return Err(CodegenError { + message: "offset is none", + }) + } + }; + out_labels.push(FuncPtr(output.ptr(*offset) as _)); + } + Ok(X64ExecutionContext { code: output, functions: self.functions, br_table_data: br_table_data, func_import_count: self.func_import_count, + signatures: match self.signatures { + Some(x) => x, + None => { + return Err(CodegenError { + message: "no signatures", + }) + } + }, + function_pointers: out_labels, + function_signatures: match self.function_signatures { + Some(x) => x, + None => { + return Err(CodegenError { + message: "no function signatures", + }) + } + }, }) } @@ -464,13 +546,15 @@ impl ModuleCodeGenerator for X64ModuleCode }; let id = labels.len(); + let offset = self.assembler.as_mut().unwrap().offset(); + let label = X64FunctionCode::emit_native_call_trampoline( self.assembler.as_mut().unwrap(), invoke_import, 0, id, ); - labels.insert(id, label); + labels.insert(id, (label, Some(offset))); self.func_import_count += 1; @@ -1092,6 +1176,7 @@ impl X64FunctionCode { stack_top: *mut u8, stack_base: *mut u8, vmctx: *mut vm::Ctx, + memory_base: *mut u8, ) -> u64, ctx1: A, ctx2: B, @@ -1114,6 +1199,7 @@ impl X64FunctionCode { ; mov rdx, rsp ; mov rcx, rbp ; mov r8, r14 // vmctx + ; mov r9, r15 // memory_base ; mov rax, QWORD (0xfffffffffffffff0u64 as i64) ; and rsp, rax ; mov rax, QWORD (target as i64) @@ -2417,12 +2503,13 @@ impl FunctionCodeGenerator for X64FunctionCode { } Operator::Call { function_index } => { let function_index = function_index as usize; - let label = *self + let label = self .function_labels .as_mut() .unwrap() .entry(function_index) - .or_insert_with(|| assembler.new_dynamic_label()); + .or_insert_with(|| (assembler.new_dynamic_label(), None)) + .0; let sig_index = match self.function_signatures.get(FuncIndex::new(function_index)) { Some(x) => *x, None => { @@ -2451,6 +2538,57 @@ impl FunctionCodeGenerator for X64FunctionCode { &return_types, )?; } + Operator::CallIndirect { index, table_index } => { + if table_index != 0 { + return Err(CodegenError { + message: "only one table is supported", + }); + } + if module_info.tables.len() != 1 { + return Err(CodegenError { + message: "no tables", + }); + } + let sig_index = SigIndex::new(index as usize); + let sig = match self.signatures.get(sig_index) { + Some(x) => x, + None => { + return Err(CodegenError { + message: "signature does not exist", + }) + } + }; + let mut param_types: Vec = + sig.params().iter().cloned().map(type_to_wp_type).collect(); + let return_types: Vec = + sig.returns().iter().cloned().map(type_to_wp_type).collect(); + param_types.push(WpType::I32); // element index + + dynasm!( + assembler + ; jmp >after_trampoline + ); + + let trampoline_label = Self::emit_native_call_trampoline( + assembler, + call_indirect, + 0usize, + index as usize, + ); + + dynasm!( + assembler + ; after_trampoline: + ); + + Self::emit_call_raw( + assembler, + &mut self.value_stack, + trampoline_label, + ¶m_types, + &return_types, + )?; + } Operator::End => { if self.control_stack.as_ref().unwrap().frames.len() == 1 { let frame = self.control_stack.as_mut().unwrap().frames.pop().unwrap(); @@ -2968,6 +3106,7 @@ unsafe extern "C" fn do_trap( stack_top: *mut u8, stack_base: *mut u8, vmctx: *mut vm::Ctx, + memory_base: *mut u8, ) -> u64 { panic!("TRAP CODE: {:?}", ctx2); } @@ -2978,9 +3117,54 @@ unsafe extern "C" fn invoke_import( stack_top: *mut u8, stack_base: *mut u8, vmctx: *mut vm::Ctx, + memory_base: *mut u8, ) -> u64 { let vmctx: &mut vm::Ctx = &mut *vmctx; let import = (*vmctx.imported_funcs.offset(import_id as isize)).func; CONSTRUCT_STACK_AND_CALL_NATIVE(stack_top, stack_base, vmctx, import) } + +unsafe extern "C" fn call_indirect( + _unused: usize, + sig_index: usize, + mut stack_top: *mut u8, + stack_base: *mut u8, + vmctx: *mut vm::Ctx, + memory_base: *mut u8, +) -> u64 { + let elem_index = *(stack_top as *mut u32) as usize; + stack_top = stack_top.offset(8); + assert!(stack_top as usize <= stack_base as usize); + + let table: &LocalTable = &*(*(*vmctx).tables); + if elem_index >= table.count as usize { + panic!("element index out of bounds"); + } + let func_index = *(table.base as *mut u32).offset(elem_index as isize) as usize; + let ctx: &X64ExecutionContext = + &*CURRENT_EXECUTION_CONTEXT.with(|x| *x.borrow().last().unwrap()); + + println!( + "SIG INDEX = {}, FUNC INDEX = {}, ELEM INDEX = {}", + sig_index, func_index, elem_index + ); + + // TODO: Fix table reading. Hardcoding func index = 1 for debugging here. + let func_index = 1usize; + + if ctx.signatures[SigIndex::new(sig_index)] + != ctx.signatures[ctx.function_signatures[FuncIndex::new(func_index)]] + { + panic!("signature mismatch"); + } + + let func = ctx.function_pointers[func_index].0; + CALL_WASM( + stack_top, + stack_base as usize - stack_top as usize, + func as _, + memory_base, + vmctx, + ) as u64 +} diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index a17e6e48b5e..67fcd52a74a 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -39,8 +39,7 @@ impl FuncResolver for Placeholder { _module: &ModuleInner, _local_func_index: LocalFuncIndex, ) -> Option> { - panic!(); - None + NonNull::new(0x3f3f3f3f3f3f3f3fusize as *mut vm::Func) } } From c6dfbcd90d82b31dc2380896ae400dd3232b78d5 Mon Sep 17 00:00:00 2001 From: losfair Date: Sat, 9 Mar 2019 02:58:10 +0800 Subject: [PATCH 052/100] Add tests. --- examples/single_pass_tests/call_indirect.wat | 12 +++++++++ examples/single_pass_tests/global.wat | 26 ++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 examples/single_pass_tests/call_indirect.wat create mode 100644 examples/single_pass_tests/global.wat diff --git a/examples/single_pass_tests/call_indirect.wat b/examples/single_pass_tests/call_indirect.wat new file mode 100644 index 00000000000..2907b568320 --- /dev/null +++ b/examples/single_pass_tests/call_indirect.wat @@ -0,0 +1,12 @@ +(module + (type $binop (func (param i32 i32) (result i32))) + (table 1 100 anyfunc) + (elem (i32.const 10) $add) + + (func $main (export "main") (result i32) + (call_indirect (type $binop) (i32.const 42) (i32.const 1) (i32.const 9)) + ) + (func $add (param i32) (param i32) (result i32) + (i32.add (get_local 0) (get_local 1)) + ) +) diff --git a/examples/single_pass_tests/global.wat b/examples/single_pass_tests/global.wat new file mode 100644 index 00000000000..f06e15302e2 --- /dev/null +++ b/examples/single_pass_tests/global.wat @@ -0,0 +1,26 @@ +(module + (global $g1 (mut i32) (i32.const 0)) + (global $g2 (mut i32) (i32.const 99)) + (func $main (export "main") + (if (i32.eq (get_global $g1) (i32.const 0)) + (then) + (else unreachable) + ) + (if (i32.eq (get_global $g2) (i32.const 99)) + (then) + (else unreachable) + ) + + (set_global $g1 (i32.add (get_global $g1) (i32.const 1))) + (set_global $g2 (i32.sub (get_global $g2) (i32.const 1))) + + (if (i32.eq (get_global $g1) (i32.const 1)) + (then) + (else unreachable) + ) + (if (i32.eq (get_global $g2) (i32.const 98)) + (then) + (else unreachable) + ) + ) +) From c5ef0a96e90d64150247096e6b16ba497dd93551 Mon Sep 17 00:00:00 2001 From: losfair Date: Sat, 9 Mar 2019 02:58:37 +0800 Subject: [PATCH 053/100] Update vm::Ctx. --- lib/runtime-core/src/vm.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/runtime-core/src/vm.rs b/lib/runtime-core/src/vm.rs index 8cb9f098647..ed8c0eeda88 100644 --- a/lib/runtime-core/src/vm.rs +++ b/lib/runtime-core/src/vm.rs @@ -14,25 +14,25 @@ use std::{ffi::c_void, mem, ptr}; #[repr(C)] pub struct Ctx { /// A pointer to an array of locally-defined memories, indexed by `MemoryIndex`. - pub(crate) memories: *mut *mut LocalMemory, + pub memories: *mut *mut LocalMemory, /// A pointer to an array of locally-defined tables, indexed by `TableIndex`. - pub(crate) tables: *mut *mut LocalTable, + pub tables: *mut *mut LocalTable, /// A pointer to an array of locally-defined globals, indexed by `GlobalIndex`. - pub(crate) globals: *mut *mut LocalGlobal, + pub globals: *mut *mut LocalGlobal, /// A pointer to an array of imported memories, indexed by `MemoryIndex, pub(crate) imported_memories: *mut *mut LocalMemory, /// A pointer to an array of imported tables, indexed by `TableIndex`. - pub(crate) imported_tables: *mut *mut LocalTable, + pub imported_tables: *mut *mut LocalTable, /// A pointer to an array of imported globals, indexed by `GlobalIndex`. - pub(crate) imported_globals: *mut *mut LocalGlobal, + pub imported_globals: *mut *mut LocalGlobal, /// A pointer to an array of imported functions, indexed by `FuncIndex`. - pub(crate) imported_funcs: *mut ImportedFunc, + pub imported_funcs: *mut ImportedFunc, local_backing: *mut LocalBacking, import_backing: *mut ImportBacking, From 683cb2090c4aae8559e7c0ddc25fc59688ffa8ef Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 12 Mar 2019 11:31:55 +0800 Subject: [PATCH 054/100] Fix call_indirect. --- examples/single_pass_tests/call_indirect.wat | 2 +- lib/dynasm-backend/src/codegen_x64.rs | 17 ++++++++--------- lib/runtime-core/src/backing.rs | 9 +++++++-- lib/runtime-core/src/instance.rs | 2 +- lib/runtime-core/src/table/anyfunc.rs | 2 ++ lib/runtime-core/src/vm.rs | 4 +++- 6 files changed, 22 insertions(+), 14 deletions(-) diff --git a/examples/single_pass_tests/call_indirect.wat b/examples/single_pass_tests/call_indirect.wat index 2907b568320..9b33407a99e 100644 --- a/examples/single_pass_tests/call_indirect.wat +++ b/examples/single_pass_tests/call_indirect.wat @@ -4,7 +4,7 @@ (elem (i32.const 10) $add) (func $main (export "main") (result i32) - (call_indirect (type $binop) (i32.const 42) (i32.const 1) (i32.const 9)) + (call_indirect (type $binop) (i32.const 42) (i32.const 1) (i32.const 10)) ) (func $add (param i32) (param i32) (result i32) (i32.add (get_local 0) (get_local 1)) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 175a3808c24..6c98adebb36 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -3141,25 +3141,24 @@ unsafe extern "C" fn call_indirect( if elem_index >= table.count as usize { panic!("element index out of bounds"); } - let func_index = *(table.base as *mut u32).offset(elem_index as isize) as usize; + let anyfunc = &*(table.base as *mut vm::Anyfunc).offset(elem_index as isize); let ctx: &X64ExecutionContext = &*CURRENT_EXECUTION_CONTEXT.with(|x| *x.borrow().last().unwrap()); - println!( - "SIG INDEX = {}, FUNC INDEX = {}, ELEM INDEX = {}", - sig_index, func_index, elem_index - ); + let func_index = anyfunc.func_index.unwrap(); - // TODO: Fix table reading. Hardcoding func index = 1 for debugging here. - let func_index = 1usize; + /*println!( + "SIG INDEX = {}, FUNC INDEX = {:?}, ELEM INDEX = {}", + sig_index, func_index, elem_index + );*/ if ctx.signatures[SigIndex::new(sig_index)] - != ctx.signatures[ctx.function_signatures[FuncIndex::new(func_index)]] + != ctx.signatures[ctx.function_signatures[func_index]] { panic!("signature mismatch"); } - let func = ctx.function_pointers[func_index].0; + let func = ctx.function_pointers[func_index.index() as usize].0; CALL_WASM( stack_top, stack_base as usize - stack_top as usize, diff --git a/lib/runtime-core/src/backing.rs b/lib/runtime-core/src/backing.rs index d81e3306552..d5abca610ba 100644 --- a/lib/runtime-core/src/backing.rs +++ b/lib/runtime-core/src/backing.rs @@ -194,7 +194,7 @@ impl LocalBacking { } }; - elements[init_base + i] = vm::Anyfunc { func, ctx, sig_id }; + elements[init_base + i] = vm::Anyfunc { func, ctx, sig_id, func_index: Some(func_index) }; } }); } @@ -232,7 +232,12 @@ impl LocalBacking { } }; - elements[init_base + i] = vm::Anyfunc { func, ctx, sig_id }; + elements[init_base + i] = vm::Anyfunc { + func, + ctx, + sig_id, + func_index: Some(func_index), + }; } }); } diff --git a/lib/runtime-core/src/instance.rs b/lib/runtime-core/src/instance.rs index a8f36325aad..055bd53317f 100644 --- a/lib/runtime-core/src/instance.rs +++ b/lib/runtime-core/src/instance.rs @@ -428,7 +428,7 @@ pub struct DynFunc<'a> { pub(crate) signature: Arc, module: &'a ModuleInner, pub(crate) instance_inner: &'a InstanceInner, - func_index: FuncIndex, + pub func_index: FuncIndex, } impl<'a> DynFunc<'a> { diff --git a/lib/runtime-core/src/table/anyfunc.rs b/lib/runtime-core/src/table/anyfunc.rs index 161e2609755..8e48a00d123 100644 --- a/lib/runtime-core/src/table/anyfunc.rs +++ b/lib/runtime-core/src/table/anyfunc.rs @@ -110,6 +110,7 @@ impl AnyfuncTable { func: ptr, ctx: ptr::null_mut(), sig_id, + func_index: None, } } AnyfuncInner::Managed(ref func) => { @@ -120,6 +121,7 @@ impl AnyfuncTable { func: func.raw(), ctx: func.instance_inner.vmctx, sig_id, + func_index: Some(func.func_index), } } }; diff --git a/lib/runtime-core/src/vm.rs b/lib/runtime-core/src/vm.rs index ed8c0eeda88..94090c6751e 100644 --- a/lib/runtime-core/src/vm.rs +++ b/lib/runtime-core/src/vm.rs @@ -3,7 +3,7 @@ use crate::{ memory::Memory, module::ModuleInner, structures::TypedIndex, - types::{LocalOrImport, MemoryIndex}, + types::{FuncIndex, LocalOrImport, MemoryIndex}, }; use std::{ffi::c_void, mem, ptr}; @@ -283,6 +283,7 @@ pub struct Anyfunc { pub func: *const Func, pub ctx: *mut Ctx, pub sig_id: SigId, + pub func_index: Option, } impl Anyfunc { @@ -291,6 +292,7 @@ impl Anyfunc { func: ptr::null(), ctx: ptr::null_mut(), sig_id: SigId(u32::max_value()), + func_index: None, } } From c3b0bd76a1edb1ab91b29ceab1eaa4797600d811 Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 12 Mar 2019 11:34:45 +0800 Subject: [PATCH 055/100] Validate return values in indirect call test. --- examples/single_pass_tests/call_indirect.wat | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/examples/single_pass_tests/call_indirect.wat b/examples/single_pass_tests/call_indirect.wat index 9b33407a99e..019f045e8d4 100644 --- a/examples/single_pass_tests/call_indirect.wat +++ b/examples/single_pass_tests/call_indirect.wat @@ -1,12 +1,25 @@ (module (type $binop (func (param i32 i32) (result i32))) (table 1 100 anyfunc) + (elem (i32.const 5) $sub) (elem (i32.const 10) $add) - (func $main (export "main") (result i32) - (call_indirect (type $binop) (i32.const 42) (i32.const 1) (i32.const 10)) + (func $main (export "main") + (if (i32.eq (call_indirect (type $binop) (i32.const 42) (i32.const 1) (i32.const 10)) (i32.const 43)) + (then) + (else unreachable) + ) + (if (i32.eq (call_indirect (type $binop) (i32.const 42) (i32.const 1) (i32.const 5)) (i32.const 41)) + (then) + (else unreachable) + ) ) + (func $add (param i32) (param i32) (result i32) (i32.add (get_local 0) (get_local 1)) ) + + (func $sub (param i32) (param i32) (result i32) + (i32.sub (get_local 0) (get_local 1)) + ) ) From 1fc7b313099fe01533a72f00a7e5e7b472caf719 Mon Sep 17 00:00:00 2001 From: Brandon Fish Date: Tue, 12 Mar 2019 19:39:10 -0500 Subject: [PATCH 056/100] Add missing trait items to DynasmCompiler so that cargo build --all works --- lib/dynasm-backend/src/lib.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index 67fcd52a74a..479eae60c24 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -20,7 +20,8 @@ use crate::codegen::{CodegenError, ModuleCodeGenerator}; use crate::parse::LoadError; use std::ptr::NonNull; use wasmer_runtime_core::{ - backend::{Backend, Compiler, FuncResolver, ProtectedCaller, Token, UserTrapper}, + backend::{sys::Memory, Backend, Compiler, FuncResolver, ProtectedCaller, Token, UserTrapper}, + cache::{Cache, Error as CacheError}, error::{CompileError, CompileResult, RuntimeResult}, module::{ModuleInfo, ModuleInner, StringTable}, structures::{Map, TypedIndex}, @@ -56,6 +57,18 @@ impl Compiler for SinglePassCompiler { info: info, }) } + + unsafe fn from_cache(&self, cache: Cache, _: Token) -> Result { + unimplemented!() + } + + fn compile_to_backend_cache_data( + &self, + wasm: &[u8], + _: Token, + ) -> CompileResult<(Box, Vec, Memory)> { + unimplemented!() + } } impl From for CompileError { From a5bab8cdf668064e8a0a844e45653471a77dbbb0 Mon Sep 17 00:00:00 2001 From: Brandon Fish Date: Tue, 12 Mar 2019 20:59:10 -0500 Subject: [PATCH 057/100] Updates to compile dynasm after merge from master --- lib/dynasm-backend/src/codegen.rs | 2 +- lib/dynasm-backend/src/codegen_x64.rs | 22 ++++++++++---------- lib/dynasm-backend/src/lib.rs | 30 ++++++++++++++++----------- lib/dynasm-backend/src/parse.rs | 2 +- 4 files changed, 31 insertions(+), 25 deletions(-) diff --git a/lib/dynasm-backend/src/codegen.rs b/lib/dynasm-backend/src/codegen.rs index b9cbec184be..f9bee801dad 100644 --- a/lib/dynasm-backend/src/codegen.rs +++ b/lib/dynasm-backend/src/codegen.rs @@ -13,7 +13,7 @@ pub trait ModuleCodeGenerator { fn finalize(self) -> Result; fn feed_signatures( &mut self, - signatures: Map>, + signatures: Map, ) -> Result<(), CodegenError>; fn feed_function_signatures( &mut self, diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 6c98adebb36..992e0a0da23 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -9,7 +9,7 @@ use dynasmrt::{ use std::cell::RefCell; use std::ptr::NonNull; use std::sync::Mutex; -use std::{collections::HashMap, sync::Arc}; +use std::{any::Any, collections::HashMap, sync::Arc}; use wasmer_runtime_core::{ backend::{Backend, Compiler, FuncResolver, ProtectedCaller, Token, UserTrapper}, error::{CompileError, CompileResult, RuntimeError, RuntimeResult}, @@ -211,7 +211,7 @@ pub struct NativeTrampolines { pub struct X64ModuleCodeGenerator { functions: Vec, - signatures: Option>>>, + signatures: Option>>, function_signatures: Option>>, function_labels: Option)>>, assembler: Option, @@ -220,7 +220,7 @@ pub struct X64ModuleCodeGenerator { } pub struct X64FunctionCode { - signatures: Arc>>, + signatures: Arc>, function_signatures: Arc>, native_trampolines: Arc, @@ -248,7 +248,7 @@ unsafe impl Sync for FuncPtr {} pub struct X64ExecutionContext { code: ExecutableBuffer, functions: Vec, - signatures: Arc>>, + signatures: Arc>, function_signatures: Arc>, function_pointers: Vec, br_table_data: Vec>, @@ -283,7 +283,7 @@ impl ProtectedCaller for X64ExecutionContext { let return_ty = self.functions[index].returns.last().cloned(); if self.functions[index].num_params != _params.len() { - return Err(RuntimeError::User { + return Err(RuntimeError::Trap { msg: "param count mismatch".into(), }); } @@ -306,7 +306,7 @@ impl ProtectedCaller for X64ExecutionContext { Value::I32(x) => LittleEndian::write_u32(buf, x as u32), Value::F32(x) => LittleEndian::write_u32(buf, f32::to_bits(x)), _ => { - return Err(RuntimeError::User { + return Err(RuntimeError::Trap { msg: "signature mismatch".into(), }) } @@ -316,7 +316,7 @@ impl ProtectedCaller for X64ExecutionContext { Value::I64(x) => LittleEndian::write_u64(buf, x as u64), Value::F64(x) => LittleEndian::write_u64(buf, f64::to_bits(x)), _ => { - return Err(RuntimeError::User { + return Err(RuntimeError::Trap { msg: "signature mismatch".into(), }) } @@ -326,7 +326,7 @@ impl ProtectedCaller for X64ExecutionContext { let memory_base: *mut u8 = if _module.info.memories.len() > 0 { if _module.info.memories.len() != 1 { - return Err(RuntimeError::User { + return Err(RuntimeError::Trap { msg: "only one linear memory is supported".into(), }); } @@ -361,8 +361,8 @@ impl ProtectedCaller for X64ExecutionContext { pub struct Trapper; impl UserTrapper for Trapper { - unsafe fn do_early_trap(&self, msg: String) -> ! { - panic!("{}", msg); + unsafe fn do_early_trap(&self, data: Box) -> ! { + panic!("do_early_trap"); } } @@ -521,7 +521,7 @@ impl ModuleCodeGenerator for X64ModuleCode fn feed_signatures( &mut self, - signatures: Map>, + signatures: Map, ) -> Result<(), CodegenError> { self.signatures = Some(Arc::new(signatures)); Ok(()) diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index 479eae60c24..00602782d79 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -20,8 +20,8 @@ use crate::codegen::{CodegenError, ModuleCodeGenerator}; use crate::parse::LoadError; use std::ptr::NonNull; use wasmer_runtime_core::{ - backend::{sys::Memory, Backend, Compiler, FuncResolver, ProtectedCaller, Token, UserTrapper}, - cache::{Cache, Error as CacheError}, + backend::{sys::Memory, Backend, CacheGen, Compiler, FuncResolver, ProtectedCaller, Token, UserTrapper}, + cache::{Artifact, Error as CacheError}, error::{CompileError, CompileResult, RuntimeResult}, module::{ModuleInfo, ModuleInner, StringTable}, structures::{Map, TypedIndex}, @@ -33,6 +33,14 @@ use wasmer_runtime_core::{ }; struct Placeholder; +impl CacheGen for Placeholder { + fn generate_cache( + &self, + module: &ModuleInner, + ) -> Result<(Box, Box<[u8]>, Memory), CacheError> { + unimplemented!() + } +} impl FuncResolver for Placeholder { fn get( @@ -45,6 +53,11 @@ impl FuncResolver for Placeholder { } pub struct SinglePassCompiler {} +impl SinglePassCompiler { + pub fn new() -> Self { + Self {} + } +} impl Compiler for SinglePassCompiler { fn compile(&self, wasm: &[u8], _: Token) -> CompileResult { @@ -52,22 +65,15 @@ impl Compiler for SinglePassCompiler { let info = parse::read_module(wasm, Backend::Dynasm, &mut mcg)?; let ec = mcg.finalize()?; Ok(ModuleInner { + cache_gen: Box::new(Placeholder), func_resolver: Box::new(Placeholder), protected_caller: Box::new(ec), info: info, }) } - unsafe fn from_cache(&self, cache: Cache, _: Token) -> Result { - unimplemented!() - } - - fn compile_to_backend_cache_data( - &self, - wasm: &[u8], - _: Token, - ) -> CompileResult<(Box, Vec, Memory)> { - unimplemented!() + unsafe fn from_cache(&self, _artifact: Artifact, _: Token) -> Result { + unimplemented!("the dynasm backend doesn't support caching yet") } } diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs index ddd16ea5024..e4909a96913 100644 --- a/lib/dynasm-backend/src/parse.rs +++ b/lib/dynasm-backend/src/parse.rs @@ -87,7 +87,7 @@ pub fn read_module< for ty in type_reader { let ty = ty?; - info.signatures.push(Arc::new(func_type_to_func_sig(ty)?)); + info.signatures.push(func_type_to_func_sig(ty)?); } mcg.feed_signatures(info.signatures.clone())?; From 68181acc74e6067f8ac4ccd284cc76736e3e502e Mon Sep 17 00:00:00 2001 From: Brandon Fish Date: Tue, 12 Mar 2019 21:14:50 -0500 Subject: [PATCH 058/100] Add dynasm to spectests --- Cargo.lock | 1 + lib/spectests/Cargo.toml | 4 +++- lib/spectests/build/spectests.rs | 8 +++++++- lib/spectests/examples/simple/main.rs | 8 +++++++- 4 files changed, 18 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 165eb0044a8..9a53b6baffa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1467,6 +1467,7 @@ version = "0.2.0" dependencies = [ "wabt 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", "wasmer-clif-backend 0.2.0", + "wasmer-dynasm-backend 0.1.0", "wasmer-llvm-backend 0.1.0", "wasmer-runtime-core 0.2.1", ] diff --git a/lib/spectests/Cargo.toml b/lib/spectests/Cargo.toml index fea4560c5bb..6881e29efed 100644 --- a/lib/spectests/Cargo.toml +++ b/lib/spectests/Cargo.toml @@ -16,6 +16,7 @@ wabt = "0.7.2" [dev-dependencies] wasmer-clif-backend = { path = "../clif-backend", version = "0.2.0" } +wasmer-dynasm-backend = { path = "../dynasm-backend", version = "0.1.0" } wabt = "0.7.2" [target.'cfg(not(windows))'.dev-dependencies] @@ -25,4 +26,5 @@ wasmer-llvm-backend = { path = "../llvm-backend", version = "0.1.0" } default = ["fast-tests"] fast-tests = [] clif = [] -llvm = [] \ No newline at end of file +llvm = [] +dynasm = [] \ No newline at end of file diff --git a/lib/spectests/build/spectests.rs b/lib/spectests/build/spectests.rs index c1d28ca2907..a13faefd18a 100644 --- a/lib/spectests/build/spectests.rs +++ b/lib/spectests/build/spectests.rs @@ -107,7 +107,13 @@ fn get_compiler() -> impl Compiler { LLVMCompiler::new() } -#[cfg(not(any(feature = "llvm", feature = "clif")))] +#[cfg(feature = "dynasm")] +fn get_compiler() -> impl Compiler { + use wasmer_dynasm_backend::SinglePassCompiler; + SinglePassCompiler::new() +} + +#[cfg(not(any(feature = "llvm", feature = "clif", feature = "dynasm")))] fn get_compiler() -> impl Compiler { panic!("compiler not specified, activate a compiler via features"); use wasmer_clif_backend::CraneliftCompiler; diff --git a/lib/spectests/examples/simple/main.rs b/lib/spectests/examples/simple/main.rs index 595bb3680f5..c61cdb18fc7 100644 --- a/lib/spectests/examples/simple/main.rs +++ b/lib/spectests/examples/simple/main.rs @@ -22,7 +22,13 @@ fn get_compiler() -> impl Compiler { LLVMCompiler::new() } -#[cfg(not(any(feature = "llvm", feature = "clif")))] +#[cfg(feature = "dynasm")] +fn get_compiler() -> impl Compiler { + use wasmer_dynasm_backend::SinglePassCompiler; + SinglePassCompiler::new() +} + +#[cfg(not(any(feature = "llvm", feature = "clif", feature = "dynasm")))] fn get_compiler() -> impl Compiler { panic!("compiler not specified, activate a compiler via features"); use wasmer_clif_backend::CraneliftCompiler; From d4ded2cf07912d63ddda5414e512c6e7f9498a27 Mon Sep 17 00:00:00 2001 From: losfair Date: Wed, 13 Mar 2019 18:23:50 +0800 Subject: [PATCH 059/100] Fix bugs found when running spectests. --- lib/dynasm-backend/src/codegen_x64.rs | 196 +++++++++++++++++++++++++- 1 file changed, 191 insertions(+), 5 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 992e0a0da23..0f6d4939d75 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -50,8 +50,8 @@ lazy_static! { ; _loop: ; cmp rsi, 0 ; je >_loop_end - ; mov eax, [rdi] - ; mov [r8], eax + ; mov rax, [rdi] + ; mov [r8], rax ; add r8, 8 ; add rdi, 8 ; sub rsi, 8 @@ -351,7 +351,13 @@ impl ProtectedCaller for X64ExecutionContext { CURRENT_EXECUTION_CONTEXT.with(|x| x.borrow_mut().pop().unwrap()); Ok(if let Some(ty) = return_ty { - vec![Value::I64(ret)] + vec![match ty { + WpType::I32 => Value::I32(ret as i32), + WpType::I64 => Value::I64(ret), + WpType::F32 => Value::F32(f32::from_bits(ret as i32 as u32)), + WpType::F64 => Value::F64(f64::from_bits(ret as u64)), + _ => unreachable!(), + }] } else { vec![] }) @@ -704,6 +710,33 @@ impl X64FunctionCode { Self::emit_binop(assembler, value_stack, f, WpType::I64, WpType::I64) } + fn emit_shift( + assembler: &mut Assembler, + value_stack: &ValueStack, + left: Register, + right: Register, + f: F, + ) { + let rcx_used = Register::RCX.is_used(value_stack); + if(rcx_used) { + dynasm!( + assembler + ; push rcx + ); + } + dynasm!( + assembler + ; mov rcx, Rq(right as u8) + ); + f(assembler, left); + if(rcx_used) { + dynasm!( + assembler + ; pop rcx + ); + } + } + fn emit_div_i32( assembler: &mut Assembler, value_stack: &ValueStack, @@ -1523,7 +1556,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Operator::Block { .. } | Operator::Loop { .. } | Operator::If { .. } => { self.unreachable_depth += 1; } - Operator::End => { + Operator::End | Operator::Else => { self.unreachable_depth -= 1; } _ => {} @@ -1943,6 +1976,76 @@ impl FunctionCodeGenerator for X64FunctionCode { }, )?; } + Operator::I32Shl => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; shl Rd(left as u8), cl + ) + }); + } + )?; + } + Operator::I32ShrU => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; shr Rd(left as u8), cl + ) + }); + } + )?; + } + Operator::I32ShrS => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; sar Rd(left as u8), cl + ) + }); + } + )?; + } + Operator::I32Rotl => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; rol Rd(left as u8), cl + ) + }); + } + )?; + } + Operator::I32Rotr => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; ror Rd(left as u8), cl + ) + }); + } + )?; + } // Comparison operators. // https://en.wikibooks.org/wiki/X86_Assembly/Control_Flow // TODO: Is reading flag register directly faster? @@ -2297,6 +2400,76 @@ impl FunctionCodeGenerator for X64FunctionCode { }, )?; } + Operator::I64Shl => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; shl Rq(left as u8), cl + ) + }); + } + )?; + } + Operator::I64ShrU => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; shr Rq(left as u8), cl + ) + }); + } + )?; + } + Operator::I64ShrS => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; sar Rq(left as u8), cl + ) + }); + } + )?; + } + Operator::I64Rotl => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; rol Rq(left as u8), cl + ) + }); + } + )?; + } + Operator::I64Rotr => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; ror Rq(left as u8), cl + ) + }); + } + )?; + } // Comparison operators. // https://en.wikibooks.org/wiki/X86_Assembly/Control_Flow // TODO: Is reading flag register directly faster? @@ -2484,6 +2657,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }); } Operator::Unreachable => { + /* Self::emit_call_raw( assembler, &mut self.value_stack, @@ -2491,6 +2665,11 @@ impl FunctionCodeGenerator for X64FunctionCode { &[], &[], )?; + */ + dynasm!( + assembler + ; ud2 + ); self.unreachable_depth = 1; } Operator::Drop => { @@ -2595,6 +2774,8 @@ impl FunctionCodeGenerator for X64FunctionCode { if !was_unreachable { Self::emit_leave_frame(assembler, &frame, &mut self.value_stack, false)?; + } else { + self.value_stack.reset_depth(0); } dynasm!( @@ -3041,7 +3222,10 @@ impl FunctionCodeGenerator for X64FunctionCode { WpType::I64, )?; } - _ => unimplemented!(), + Operator::Nop => {} + _ => { + panic!("{:?}", op); + }, } Ok(()) } @@ -3122,6 +3306,8 @@ unsafe extern "C" fn invoke_import( let vmctx: &mut vm::Ctx = &mut *vmctx; let import = (*vmctx.imported_funcs.offset(import_id as isize)).func; + return 0; // TODO: Fix this. + CONSTRUCT_STACK_AND_CALL_NATIVE(stack_top, stack_base, vmctx, import) } From 179bbf9e41b919b0f922322ee98f557ec38c07e0 Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 14 Mar 2019 09:15:25 +0800 Subject: [PATCH 060/100] Fix integer division. --- lib/dynasm-backend/src/codegen_x64.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 0f6d4939d75..21720ead429 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -759,16 +759,17 @@ impl X64FunctionCode { ; push r15 ; mov r15d, Rd(right as u8) ; mov eax, Rd(left as u8) - ; mov edx, 0 ); if signed { dynasm!( assembler + ; cdq ; idiv r15d ); } else { dynasm!( assembler + ; xor edx, edx ; div r15d ); } @@ -808,16 +809,17 @@ impl X64FunctionCode { ; push r15 ; mov r15, Rq(right as u8) ; mov rax, Rq(left as u8) - ; mov rdx, 0 ); if signed { dynasm!( assembler + ; cqo ; idiv r15 ); } else { dynasm!( assembler + ; xor rdx, rdx ; div r15 ); } From e5d67c9705cef1bdd32d2059197d81ec8747664c Mon Sep 17 00:00:00 2001 From: Brandon Fish Date: Wed, 13 Mar 2019 20:16:07 -0500 Subject: [PATCH 061/100] Add dynasm feature to emscripten tests --- Cargo.lock | 1 + lib/emscripten/Cargo.toml | 4 +++- lib/emscripten/src/utils.rs | 8 +++++++- lib/emscripten/tests/emtests/_common.rs | 8 +++++++- 4 files changed, 18 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9a53b6baffa..e6421e1c05e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1387,6 +1387,7 @@ dependencies = [ "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", "wabt 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", "wasmer-clif-backend 0.2.0", + "wasmer-dynasm-backend 0.1.0", "wasmer-llvm-backend 0.1.0", "wasmer-runtime-core 0.2.1", ] diff --git a/lib/emscripten/Cargo.toml b/lib/emscripten/Cargo.toml index cd691e56652..cfffe82f825 100644 --- a/lib/emscripten/Cargo.toml +++ b/lib/emscripten/Cargo.toml @@ -21,6 +21,7 @@ rand = "0.6" [dev-dependencies] wasmer-clif-backend = { path = "../clif-backend", version = "0.2.0" } wabt = "0.7.2" +wasmer-dynasm-backend = { path = "../dynasm-backend", version = "0.1.0" } [target.'cfg(not(windows))'.dev-dependencies] wasmer-llvm-backend = { path = "../llvm-backend", version = "0.1.0" } @@ -30,4 +31,5 @@ glob = "0.2.11" [features] clif = [] -llvm = [] \ No newline at end of file +llvm = [] +dynasm = [] \ No newline at end of file diff --git a/lib/emscripten/src/utils.rs b/lib/emscripten/src/utils.rs index 79936f2f40f..dfb7f6f6437 100644 --- a/lib/emscripten/src/utils.rs +++ b/lib/emscripten/src/utils.rs @@ -185,7 +185,13 @@ mod tests { LLVMCompiler::new() } - #[cfg(not(any(feature = "llvm", feature = "clif")))] + #[cfg(feature = "dynasm")] + fn get_compiler() -> impl Compiler { + use wasmer_dynasm_backend::SinglePassCompiler; + SinglePassCompiler::new() + } + + #[cfg(not(any(feature = "llvm", feature = "clif", feature = "dynasm")))] fn get_compiler() -> impl Compiler { panic!("compiler not specified, activate a compiler via features"); use wasmer_clif_backend::CraneliftCompiler; diff --git a/lib/emscripten/tests/emtests/_common.rs b/lib/emscripten/tests/emtests/_common.rs index e7a42b356b9..1289fe741d2 100644 --- a/lib/emscripten/tests/emtests/_common.rs +++ b/lib/emscripten/tests/emtests/_common.rs @@ -20,7 +20,13 @@ macro_rules! assert_emscripten_output { LLVMCompiler::new() } - #[cfg(not(any(feature = "llvm", feature = "clif")))] + #[cfg(feature = "dynasm")] + fn get_compiler() -> impl Compiler { + use wasmer_dynasm_backend::SinglePassCompiler; + SinglePassCompiler::new() + } + + #[cfg(not(any(feature = "llvm", feature = "clif", feature = "dynasm")))] fn get_compiler() -> impl Compiler { panic!("compiler not specified, activate a compiler via features"); use wasmer_clif_backend::CraneliftCompiler; From d80ea47ffcb012f6011c5f707c2042b492e87a21 Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 14 Mar 2019 10:30:24 +0800 Subject: [PATCH 062/100] FIx bugs found with spectests. --- lib/dynasm-backend/src/codegen_x64.rs | 246 ++++++++++++++++++++++++-- lib/runtime-core/src/vm.rs | 2 +- 2 files changed, 237 insertions(+), 11 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 21720ead429..69e754acbf3 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -17,8 +17,10 @@ use wasmer_runtime_core::{ structures::{Map, TypedIndex}, types::{ FuncIndex, FuncSig, GlobalIndex, LocalFuncIndex, LocalGlobalIndex, MemoryIndex, SigIndex, - TableIndex, Type, Value, + TableIndex, Type, Value, LocalMemoryIndex, ImportedMemoryIndex, LocalOrImport, }, + memory::MemoryType, + units::Pages, vm::{self, ImportBacking, LocalGlobal, LocalTable}, }; use wasmparser::{Operator, Type as WpType}; @@ -207,6 +209,18 @@ pub enum TrapCode { pub struct NativeTrampolines { trap_unreachable: DynamicLabel, + memory_size_dynamic_local: DynamicLabel, + memory_size_static_local: DynamicLabel, + memory_size_shared_local: DynamicLabel, + memory_size_dynamic_import: DynamicLabel, + memory_size_static_import: DynamicLabel, + memory_size_shared_import: DynamicLabel, + memory_grow_dynamic_local: DynamicLabel, + memory_grow_static_local: DynamicLabel, + memory_grow_shared_local: DynamicLabel, + memory_grow_dynamic_import: DynamicLabel, + memory_grow_static_import: DynamicLabel, + memory_grow_shared_import: DynamicLabel, } pub struct X64ModuleCodeGenerator { @@ -325,12 +339,19 @@ impl ProtectedCaller for X64ExecutionContext { } let memory_base: *mut u8 = if _module.info.memories.len() > 0 { - if _module.info.memories.len() != 1 { + if _module.info.memories.len() != 1 || _module.info.imported_memories.len() != 0 { return Err(RuntimeError::Trap { msg: "only one linear memory is supported".into(), }); } unsafe { (**(*_vmctx).memories).base } + } else if _module.info.imported_memories.len() > 0 { + if _module.info.memories.len() != 0 || _module.info.imported_memories.len() != 1 { + return Err(RuntimeError::Trap { + msg: "only one linear memory is supported".into(), + }); + } + unsafe { (**(*_vmctx).imported_memories).base } } else { ::std::ptr::null_mut() }; @@ -392,6 +413,78 @@ impl X64ModuleCodeGenerator { 0usize, TrapCode::Unreachable, ), + memory_size_dynamic_local: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_size, + MemoryKind::DynamicLocal, + 0usize, + ), + memory_size_static_local: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_size, + MemoryKind::StaticLocal, + 0usize, + ), + memory_size_shared_local: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_size, + MemoryKind::SharedLocal, + 0usize, + ), + memory_size_dynamic_import: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_size, + MemoryKind::DynamicImport, + 0usize, + ), + memory_size_static_import: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_size, + MemoryKind::StaticImport, + 0usize, + ), + memory_size_shared_import: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_size, + MemoryKind::SharedImport, + 0usize, + ), + memory_grow_dynamic_local: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_grow, + MemoryKind::DynamicLocal, + 0usize, + ), + memory_grow_static_local: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_grow, + MemoryKind::StaticLocal, + 0usize, + ), + memory_grow_shared_local: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_grow, + MemoryKind::SharedLocal, + 0usize, + ), + memory_grow_dynamic_import: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_grow, + MemoryKind::DynamicImport, + 0usize, + ), + memory_grow_static_import: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_grow, + MemoryKind::StaticImport, + 0usize, + ), + memory_grow_shared_import: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_grow, + MemoryKind::SharedImport, + 0usize, + ), }; X64ModuleCodeGenerator { @@ -1294,11 +1387,11 @@ impl X64FunctionCode { let mut offset: usize = 0; let mut caller_stack_offset: usize = 0; - for ty in params { + for ty in params.iter().rev() { let val = value_stack.pop()?; if val.ty != *ty { return Err(CodegenError { - message: "value type mismatch", + message: "value type mismatch in call", }); } @@ -1429,7 +1522,7 @@ impl X64FunctionCode { if value_info.ty != value_ty { return Err(CodegenError { - message: "value type mismatch", + message: "value type mismatch in memory store", }); } @@ -2725,11 +2818,25 @@ impl FunctionCodeGenerator for X64FunctionCode { message: "only one table is supported", }); } - if module_info.tables.len() != 1 { + let local_or_import = if module_info.tables.len() > 0 { + if module_info.tables.len() != 1 || module_info.imported_tables.len() != 0 { + return Err(CodegenError { + message: "only one table is supported", + }); + } + CallIndirectLocalOrImport::Local + } else if module_info.imported_tables.len() > 0 { + if module_info.tables.len() != 0 || module_info.imported_tables.len() != 1 { + return Err(CodegenError { + message: "only one table is supported", + }); + } + CallIndirectLocalOrImport::Import + } else { return Err(CodegenError { message: "no tables", }); - } + }; let sig_index = SigIndex::new(index as usize); let sig = match self.signatures.get(sig_index) { Some(x) => x, @@ -2753,8 +2860,8 @@ impl FunctionCodeGenerator for X64FunctionCode { let trampoline_label = Self::emit_native_call_trampoline( assembler, call_indirect, - 0usize, index as usize, + local_or_import, ); dynasm!( @@ -3225,6 +3332,62 @@ impl FunctionCodeGenerator for X64FunctionCode { )?; } Operator::Nop => {} + Operator::MemorySize { reserved } => { + let memory_index = MemoryIndex::new(reserved as usize); + let label = match memory_index.local_or_import(module_info) { + LocalOrImport::Local(local_mem_index) => { + let mem_desc = &module_info.memories[local_mem_index]; + match mem_desc.memory_type() { + MemoryType::Dynamic => self.native_trampolines.memory_size_dynamic_local, + MemoryType::Static => self.native_trampolines.memory_size_static_local, + MemoryType::SharedStatic => self.native_trampolines.memory_size_shared_local, + } + } + LocalOrImport::Import(import_mem_index) => { + let mem_desc = &module_info.imported_memories[import_mem_index].1; + match mem_desc.memory_type() { + MemoryType::Dynamic => self.native_trampolines.memory_size_dynamic_import, + MemoryType::Static => self.native_trampolines.memory_size_static_import, + MemoryType::SharedStatic => self.native_trampolines.memory_size_shared_import, + } + } + }; + Self::emit_call_raw( + assembler, + &mut self.value_stack, + label, + &[], + &[WpType::I32] + )?; + } + Operator::MemoryGrow { reserved } => { + let memory_index = MemoryIndex::new(reserved as usize); + let label = match memory_index.local_or_import(module_info) { + LocalOrImport::Local(local_mem_index) => { + let mem_desc = &module_info.memories[local_mem_index]; + match mem_desc.memory_type() { + MemoryType::Dynamic => self.native_trampolines.memory_grow_dynamic_local, + MemoryType::Static => self.native_trampolines.memory_grow_static_local, + MemoryType::SharedStatic => self.native_trampolines.memory_grow_shared_local, + } + } + LocalOrImport::Import(import_mem_index) => { + let mem_desc = &module_info.imported_memories[import_mem_index].1; + match mem_desc.memory_type() { + MemoryType::Dynamic => self.native_trampolines.memory_grow_dynamic_import, + MemoryType::Static => self.native_trampolines.memory_grow_static_import, + MemoryType::SharedStatic => self.native_trampolines.memory_grow_shared_import, + } + } + }; + Self::emit_call_raw( + assembler, + &mut self.value_stack, + label, + &[WpType::I32], + &[WpType::I32] + )?; + } _ => { panic!("{:?}", op); }, @@ -3313,9 +3476,16 @@ unsafe extern "C" fn invoke_import( CONSTRUCT_STACK_AND_CALL_NATIVE(stack_top, stack_base, vmctx, import) } +#[repr(u64)] +#[derive(Copy, Clone, Debug)] +enum CallIndirectLocalOrImport { + Local, + Import +} + unsafe extern "C" fn call_indirect( - _unused: usize, sig_index: usize, + local_or_import: CallIndirectLocalOrImport, mut stack_top: *mut u8, stack_base: *mut u8, vmctx: *mut vm::Ctx, @@ -3325,7 +3495,10 @@ unsafe extern "C" fn call_indirect( stack_top = stack_top.offset(8); assert!(stack_top as usize <= stack_base as usize); - let table: &LocalTable = &*(*(*vmctx).tables); + let table: &LocalTable = match local_or_import { + CallIndirectLocalOrImport::Local => &*(*(*vmctx).tables), + CallIndirectLocalOrImport::Import => &*(*(*vmctx).imported_tables), + } ; if elem_index >= table.count as usize { panic!("element index out of bounds"); } @@ -3355,3 +3528,56 @@ unsafe extern "C" fn call_indirect( vmctx, ) as u64 } + +#[repr(u64)] +#[derive(Copy, Clone, Debug)] +enum MemoryKind { + DynamicLocal, + StaticLocal, + SharedLocal, + DynamicImport, + StaticImport, + SharedImport, +} + +unsafe extern "C" fn _memory_size( + op: MemoryKind, + index: usize, + mut stack_top: *mut u8, + stack_base: *mut u8, + vmctx: *mut vm::Ctx, + memory_base: *mut u8, +) -> u64 { + use wasmer_runtime_core::vmcalls; + let ret = match op { + MemoryKind::DynamicLocal => vmcalls::local_dynamic_memory_size(&*vmctx, LocalMemoryIndex::new(index)), + MemoryKind::StaticLocal => vmcalls::local_static_memory_size(&*vmctx, LocalMemoryIndex::new(index)), + MemoryKind::SharedLocal => unreachable!(), + MemoryKind::DynamicImport => vmcalls::imported_dynamic_memory_size(&*vmctx, ImportedMemoryIndex::new(index)), + MemoryKind::StaticImport => vmcalls::imported_static_memory_size(&*vmctx, ImportedMemoryIndex::new(index)), + MemoryKind::SharedImport => unreachable!(), + }; + ret.0 as u32 as u64 +} + +unsafe extern "C" fn _memory_grow( + op: MemoryKind, + index: usize, + mut stack_top: *mut u8, + stack_base: *mut u8, + vmctx: *mut vm::Ctx, + memory_base: *mut u8, +) -> u64 { + use wasmer_runtime_core::vmcalls; + assert_eq!(stack_base as usize - stack_top as usize, 8); + let pages = Pages(*(stack_top as *mut u32)); + let ret = match op { + MemoryKind::DynamicLocal => vmcalls::local_dynamic_memory_grow(&mut *vmctx, LocalMemoryIndex::new(index), pages), + MemoryKind::StaticLocal => vmcalls::local_static_memory_grow(&mut *vmctx, LocalMemoryIndex::new(index), pages), + MemoryKind::SharedLocal => unreachable!(), + MemoryKind::DynamicImport => vmcalls::imported_dynamic_memory_grow(&mut *vmctx, ImportedMemoryIndex::new(index), pages), + MemoryKind::StaticImport => vmcalls::imported_static_memory_grow(&mut *vmctx, ImportedMemoryIndex::new(index), pages), + MemoryKind::SharedImport => unreachable!(), + }; + ret as u32 as u64 +} diff --git a/lib/runtime-core/src/vm.rs b/lib/runtime-core/src/vm.rs index d26b52345d4..b668833cd55 100644 --- a/lib/runtime-core/src/vm.rs +++ b/lib/runtime-core/src/vm.rs @@ -23,7 +23,7 @@ pub struct Ctx { pub globals: *mut *mut LocalGlobal, /// A pointer to an array of imported memories, indexed by `MemoryIndex, - pub(crate) imported_memories: *mut *mut LocalMemory, + pub imported_memories: *mut *mut LocalMemory, /// A pointer to an array of imported tables, indexed by `TableIndex`. pub imported_tables: *mut *mut LocalTable, From 592c3fb9385e9d53bdb6a7e36f37e2f176a22cfa Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 14 Mar 2019 17:11:35 +0800 Subject: [PATCH 063/100] Fixes and some floating point operations. --- lib/dynasm-backend/src/codegen_x64.rs | 404 +++++++++++++++++++++++++- 1 file changed, 401 insertions(+), 3 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 69e754acbf3..c5b5dcd4d06 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -676,6 +676,17 @@ impl X64FunctionCode { Ok(()) } + fn emit_reinterpret(value_stack: &mut ValueStack, in_ty: WpType, out_ty: WpType) -> Result<(), CodegenError> { + let val = value_stack.pop()?; + if val.ty != in_ty { + return Err(CodegenError { + message: "reinterpret type mismatch" + }); + } + value_stack.push(out_ty); + Ok(()) + } + /// Emits a unary operator. fn emit_unop( assembler: &mut Assembler, @@ -1117,7 +1128,7 @@ impl X64FunctionCode { Some(x) => x, None => { return Err(CodegenError { - message: "no frame", + message: "no frame (else)", }) } }; @@ -1163,7 +1174,7 @@ impl X64FunctionCode { Some(x) => x, None => { return Err(CodegenError { - message: "no frame", + message: "no frame (block end)", }) } }; @@ -1651,9 +1662,17 @@ impl FunctionCodeGenerator for X64FunctionCode { Operator::Block { .. } | Operator::Loop { .. } | Operator::If { .. } => { self.unreachable_depth += 1; } - Operator::End | Operator::Else => { + Operator::End => { self.unreachable_depth -= 1; } + Operator::Else => { + // We are in a reachable true branch + if self.unreachable_depth == 1 { + if let Some(IfElseState::If(_)) = self.control_stack.as_ref().unwrap().frames.last().map(|x| x.if_else) { + self.unreachable_depth -= 1; + } + } + } _ => {} } if self.unreachable_depth > 0 { @@ -1981,6 +2000,18 @@ impl FunctionCodeGenerator for X64FunctionCode { }, )?; } + Operator::I32Xor => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; xor Rd(left as u8), Rd(right as u8) + ); + }, + )?; + } Operator::I32Eq => { Self::emit_binop_i32( assembler, @@ -2399,6 +2430,18 @@ impl FunctionCodeGenerator for X64FunctionCode { }, )?; } + Operator::I64Xor => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; xor Rq(left as u8), Rq(right as u8) + ); + }, + )?; + } Operator::I64Eq => { Self::emit_binop( assembler, @@ -3331,6 +3374,361 @@ impl FunctionCodeGenerator for X64FunctionCode { WpType::I64, )?; } + Operator::F32Const { value } => { + let location = self.value_stack.push(WpType::F32); + match location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rd(reg as u8), value.bits() as i32 + ); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; push value.bits() as i32 + ); + } + } + } + Operator::F64Const { value } => { + let location = self.value_stack.push(WpType::F64); + match location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rq(reg as u8), QWORD value.bits() as i64 + ); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; mov rax, QWORD value.bits() as i64 + ; push rax + ); + } + } + } + Operator::F32Load { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; mov Rd(reg as u8), [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::F32, + )?; + } + Operator::F32Store { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rd(value_reg as u8) + ); + }, + WpType::F32, + )?; + } + Operator::F64Load { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; mov Rq(reg as u8), [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::F64, + )?; + } + Operator::F64Store { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rq(value_reg as u8) + ); + }, + WpType::F64, + )?; + } + Operator::I32ReinterpretF32 => { + Self::emit_reinterpret(&mut self.value_stack, WpType::F32, WpType::I32)?; + } + Operator::F32ReinterpretI32 => { + Self::emit_reinterpret(&mut self.value_stack, WpType::I32, WpType::F32)?; + } + Operator::I64ReinterpretF64 => { + Self::emit_reinterpret(&mut self.value_stack, WpType::F64, WpType::I64)?; + } + Operator::F64ReinterpretI64 => { + Self::emit_reinterpret(&mut self.value_stack, WpType::I64, WpType::F64)?; + } + Operator::F32Add => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; addss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Sub => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; subss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Mul => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; mulss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Div => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; divss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Max => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; maxss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Min => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; minss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Eq => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; cmpeqss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F32, + WpType::I32, + )?; + } + Operator::F32Ne => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; cmpneqss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F32, + WpType::I32, + )?; + } + Operator::F32Gt => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; vcmpgtss xmm1, xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F32, + WpType::I32, + )?; + } + Operator::F32Ge => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; vcmpgess xmm1, xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F32, + WpType::I32, + )?; + } + Operator::F32Lt => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; cmpltss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F32, + WpType::I32, + )?; + } + Operator::F32Le => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; cmpless xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F32, + WpType::I32, + )?; + } + Operator::F32Sqrt => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; sqrtss xmm1, xmm1 + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32 + )?; + } + Operator::F32Abs => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; and Rd(reg as u8), 0x7fffffffu32 as i32 + ); + }, + WpType::F32, + WpType::F32 + )?; + } + Operator::F32Neg => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; btc Rd(reg as u8), 31 + ); + }, + WpType::F32, + WpType::F32 + )?; + } Operator::Nop => {} Operator::MemorySize { reserved } => { let memory_index = MemoryIndex::new(reserved as usize); From 4d2b6a0efedd9b6a71163bbdd766a55dc462522e Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 14 Mar 2019 17:11:48 +0800 Subject: [PATCH 064/100] Add Copy + Clone for IfElseState --- lib/dynasm-backend/src/stack.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/dynasm-backend/src/stack.rs b/lib/dynasm-backend/src/stack.rs index e78eaa0ed31..d1ffc29f859 100644 --- a/lib/dynasm-backend/src/stack.rs +++ b/lib/dynasm-backend/src/stack.rs @@ -21,7 +21,7 @@ pub enum RegisterName { Invalid, }*/ -#[derive(Debug)] +#[derive(Debug, Copy, Clone)] pub enum IfElseState { None, If(DynamicLabel), From 1104073641ad2052032d3cbd46cda5ed0a417d68 Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 14 Mar 2019 19:10:22 +0800 Subject: [PATCH 065/100] Implemented more floating point operations. --- lib/dynasm-backend/src/codegen_x64.rs | 504 ++++++++++++++++++++++++++ 1 file changed, 504 insertions(+) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index c5b5dcd4d06..2920df70054 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -3475,6 +3475,206 @@ impl FunctionCodeGenerator for X64FunctionCode { Operator::F64ReinterpretI64 => { Self::emit_reinterpret(&mut self.value_stack, WpType::I64, WpType::F64)?; } + Operator::F32ConvertSI32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; cvtsi2ss xmm1, Rd(reg as u8) + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::I32, + WpType::F32, + )?; + } + Operator::F32ConvertUI32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; mov Rd(reg as u8), Rd(reg as u8) // clear upper 32 bits + ; cvtsi2ss xmm1, Rq(reg as u8) + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::I32, + WpType::F32, + )?; + } + Operator::F32ConvertSI64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; cvtsi2ss xmm1, Rq(reg as u8) + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::I64, + WpType::F32, + )?; + } + /* + 0: 48 85 ff test %rdi,%rdi + 3: 78 0b js 10 + 5: c4 e1 fb 2a c7 vcvtsi2sd %rdi,%xmm0,%xmm0 + a: c3 retq + b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) + 10: 48 89 f8 mov %rdi,%rax + 13: 83 e7 01 and $0x1,%edi + 16: 48 d1 e8 shr %rax + 19: 48 09 f8 or %rdi,%rax + 1c: c4 e1 fb 2a c0 vcvtsi2sd %rax,%xmm0,%xmm0 + 21: c5 fb 58 c0 vaddsd %xmm0,%xmm0,%xmm0 + 25: c3 retq + */ + Operator::F32ConvertUI64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; test Rq(reg as u8), Rq(reg as u8) + ; js >do_convert + // fast path: positive as signed + ; cvtsi2ss xmm1, Rq(reg as u8) + ; movd Rd(reg as u8), xmm1 + ; jmp >end_convert + ; do_convert: + // use r15 as temporary register + ; movq xmm5, r15 + ; mov r15, Rq(reg as u8) + ; and r15, 1 + ; shr Rq(reg as u8), 1 + ; or Rq(reg as u8), r15 + ; cvtsi2ss xmm1, Rq(reg as u8) + ; addsd xmm1, xmm1 + ; movq r15, xmm5 + ; movd Rd(reg as u8), xmm1 + ; end_convert: + ); + }, + WpType::I64, + WpType::F32, + )?; + } + Operator::F64ConvertSI32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; cvtsi2sd xmm1, Rd(reg as u8) + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::I32, + WpType::F64, + )?; + } + Operator::F64ConvertUI32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; mov Rd(reg as u8), Rd(reg as u8) // clear upper 32 bits + ; cvtsi2sd xmm1, Rq(reg as u8) + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::I32, + WpType::F64, + )?; + } + Operator::F64ConvertSI64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; cvtsi2sd xmm1, Rq(reg as u8) + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::I64, + WpType::F64, + )?; + } + Operator::F64ConvertUI64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; test Rq(reg as u8), Rq(reg as u8) + ; js >do_convert + // fast path: positive as signed + ; cvtsi2sd xmm1, Rq(reg as u8) + ; movq Rq(reg as u8), xmm1 + ; jmp >end_convert + ; do_convert: + // use r15 as temporary register + ; movq xmm5, r15 + ; mov r15, Rq(reg as u8) + ; and r15, 1 + ; shr Rq(reg as u8), 1 + ; or Rq(reg as u8), r15 + ; cvtsi2sd xmm1, Rq(reg as u8) + ; addsd xmm1, xmm1 + ; movq r15, xmm5 + ; movq Rq(reg as u8), xmm1 + ; end_convert: + ); + }, + WpType::I64, + WpType::F64, + )?; + } + Operator::F64PromoteF32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; cvtss2sd xmm1, xmm1 + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::F64, + )?; + } + Operator::F32DemoteF64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; cvtsd2ss xmm1, xmm1 + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::F32, + )?; + } Operator::F32Add => { Self::emit_binop( assembler, @@ -3685,6 +3885,29 @@ impl FunctionCodeGenerator for X64FunctionCode { WpType::I32, )?; } + Operator::F32Copysign => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; mov eax, 0x7fffffffu32 as i32 + ; movd xmm3, eax + ; pand xmm1, xmm3 + ; mov eax, 0x80000000u32 as i32 + ; movd xmm3, eax + ; pand xmm2, xmm3 + ; por xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } Operator::F32Sqrt => { Self::emit_unop( assembler, @@ -3729,6 +3952,287 @@ impl FunctionCodeGenerator for X64FunctionCode { WpType::F32 )?; } + Operator::F64Add => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; addsd xmm1, xmm2 + ; movq Rq(left as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Sub => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; subsd xmm1, xmm2 + ; movq Rq(left as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Mul => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; mulsd xmm1, xmm2 + ; movq Rq(left as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Div => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; divsd xmm1, xmm2 + ; movq Rq(left as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Max => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; maxsd xmm1, xmm2 + ; movq Rq(left as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Min => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; minsd xmm1, xmm2 + ; movq Rq(left as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Eq => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; cmpeqsd xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F64, + WpType::I32, + )?; + } + Operator::F64Ne => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; cmpneqsd xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F64, + WpType::I32, + )?; + } + Operator::F64Gt => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; vcmpgtsd xmm1, xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F64, + WpType::I32, + )?; + } + Operator::F64Ge => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; vcmpgesd xmm1, xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F64, + WpType::I32, + )?; + } + Operator::F64Lt => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; cmpltsd xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F64, + WpType::I32, + )?; + } + Operator::F64Le => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; cmplesd xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F64, + WpType::I32, + )?; + } + Operator::F64Copysign => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; mov rax, QWORD 0x7fffffffffffffffu64 as i64 + ; movq xmm3, rax + ; pand xmm1, xmm3 + ; mov rax, QWORD 0x8000000000000000u64 as i64 + ; movq xmm3, rax + ; pand xmm2, xmm3 + ; por xmm1, xmm2 + ; movq Rq(left as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Sqrt => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; sqrtsd xmm1, xmm1 + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64 + )?; + } + Operator::F64Abs => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; mov rax, QWORD 0x7fffffffffffffff + ; movq xmm2, rax + ; por xmm1, xmm2 + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64 + )?; + } + Operator::F64Neg => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; btc Rq(reg as u8), 63 + ); + }, + WpType::F64, + WpType::F64 + )?; + } Operator::Nop => {} Operator::MemorySize { reserved } => { let memory_index = MemoryIndex::new(reserved as usize); From 1b5ea9bece2876a37e19b8b64d8ce5aebf9cbba2 Mon Sep 17 00:00:00 2001 From: losfair Date: Fri, 15 Mar 2019 01:10:31 +0800 Subject: [PATCH 066/100] Passes all `assert_return(_*)` tests. --- lib/dynasm-backend/src/codegen_x64.rs | 192 ++++++++++++++++++++++++++ 1 file changed, 192 insertions(+) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 2920df70054..8c4d26e1e95 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -3952,6 +3952,102 @@ impl FunctionCodeGenerator for X64FunctionCode { WpType::F32 )?; } + Operator::F32Nearest => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; roundss xmm1, xmm1, 0 + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32 + )?; + } + Operator::F32Floor => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; roundss xmm1, xmm1, 1 + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32 + )?; + } + Operator::F32Ceil => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; roundss xmm1, xmm1, 2 + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32 + )?; + } + Operator::F32Trunc => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; roundss xmm1, xmm1, 3 + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32 + )?; + } + Operator::I32TruncUF32 | Operator::I32TruncSF32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; roundss xmm1, xmm1, 3 + ; cvtss2si Rd(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::I32 + )?; + } + Operator::I64TruncUF32 | Operator::I64TruncSF32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; roundss xmm1, xmm1, 3 + ; cvtss2si Rq(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::I64 + )?; + } Operator::F64Add => { Self::emit_binop( assembler, @@ -4233,6 +4329,102 @@ impl FunctionCodeGenerator for X64FunctionCode { WpType::F64 )?; } + Operator::F64Nearest => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; roundsd xmm1, xmm1, 0 + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64 + )?; + } + Operator::F64Floor => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; roundsd xmm1, xmm1, 1 + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64 + )?; + } + Operator::F64Ceil => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; roundsd xmm1, xmm1, 2 + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64 + )?; + } + Operator::F64Trunc => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; roundsd xmm1, xmm1, 3 + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64 + )?; + } + Operator::I32TruncUF64 | Operator::I32TruncSF64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; roundsd xmm1, xmm1, 3 + ; cvtsd2si Rd(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::I32 + )?; + } + Operator::I64TruncUF64 | Operator::I64TruncSF64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; roundsd xmm1, xmm1, 3 + ; cvtsd2si Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::I64 + )?; + } Operator::Nop => {} Operator::MemorySize { reserved } => { let memory_index = MemoryIndex::new(reserved as usize); From 53a8fca4d53262c0d19244c6c13736b4edbd07ca Mon Sep 17 00:00:00 2001 From: Syrus Date: Sat, 16 Mar 2019 11:52:11 -0700 Subject: [PATCH 067/100] Make wasmer executable work with dynasm --- Makefile | 2 +- lib/dynasm-backend/src/lib.rs | 6 ++++-- src/bin/wasmer.rs | 5 +++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 0c9a0692d7a..a51d667894a 100644 --- a/Makefile +++ b/Makefile @@ -48,7 +48,7 @@ test-emscripten: release: # If you are in OS-X, you will need mingw-w64 for cross compiling to windows # brew install mingw-w64 - cargo build --release + cargo +nightly build --release debug-release: cargo build --release --features debug diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index 00602782d79..3695b98a4d3 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -38,7 +38,8 @@ impl CacheGen for Placeholder { &self, module: &ModuleInner, ) -> Result<(Box, Box<[u8]>, Memory), CacheError> { - unimplemented!() + // unimplemented!() + Err(CacheError::Unknown("the dynasm backend doesn't support caching yet".to_string())) } } @@ -73,7 +74,8 @@ impl Compiler for SinglePassCompiler { } unsafe fn from_cache(&self, _artifact: Artifact, _: Token) -> Result { - unimplemented!("the dynasm backend doesn't support caching yet") + Err(CacheError::Unknown("the dynasm backend doesn't support caching yet".to_string())) + // unimplemented!("the dynasm backend doesn't support caching yet") } } diff --git a/src/bin/wasmer.rs b/src/bin/wasmer.rs index e36e99cb613..ec051105e63 100644 --- a/src/bin/wasmer.rs +++ b/src/bin/wasmer.rs @@ -128,8 +128,9 @@ fn execute_wasm(options: &Run) -> Result<(), String> { let module = webassembly::compile(&wasm_binary[..]) .map_err(|e| format!("Can't compile module: {:?}", e))?; - // We save the module into a cache file - cache.store(hash, module.clone()).unwrap(); + // We try to save the module into a cache file + cache.store(hash, module.clone()).unwrap_or_default(); + module } }; From 81af8cfa9c563ec8683dd11642f94d134ff80cfc Mon Sep 17 00:00:00 2001 From: losfair Date: Sun, 17 Mar 2019 03:07:27 +0800 Subject: [PATCH 068/100] Fixed several bugs in the single-pass backend and implemented a runtime resolver that allows to call managed functions with System V cc. --- lib/dynasm-backend/src/codegen.rs | 6 +- lib/dynasm-backend/src/codegen_x64.rs | 215 +++++++++++++++++++++----- lib/dynasm-backend/src/lib.rs | 4 +- lib/dynasm-backend/src/parse.rs | 5 +- 4 files changed, 181 insertions(+), 49 deletions(-) diff --git a/lib/dynasm-backend/src/codegen.rs b/lib/dynasm-backend/src/codegen.rs index f9bee801dad..5bb299cb105 100644 --- a/lib/dynasm-backend/src/codegen.rs +++ b/lib/dynasm-backend/src/codegen.rs @@ -1,6 +1,6 @@ use std::sync::Arc; use wasmer_runtime_core::{ - backend::ProtectedCaller, + backend::{ProtectedCaller, FuncResolver}, structures::Map, types::{FuncIndex, FuncSig, SigIndex}, units::Pages, @@ -8,9 +8,9 @@ use wasmer_runtime_core::{ }; use wasmparser::{Operator, Type as WpType}; -pub trait ModuleCodeGenerator { +pub trait ModuleCodeGenerator { fn next_function(&mut self) -> Result<&mut FCG, CodegenError>; - fn finalize(self) -> Result; + fn finalize(self, module_info: &ModuleInfo) -> Result<(PC, FR), CodegenError>; fn feed_signatures( &mut self, signatures: Map, diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 8c4d26e1e95..ed7cbf0aa22 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -21,7 +21,7 @@ use wasmer_runtime_core::{ }, memory::MemoryType, units::Pages, - vm::{self, ImportBacking, LocalGlobal, LocalTable}, + vm::{self, ImportBacking, LocalGlobal, LocalTable, LocalMemory}, }; use wasmparser::{Operator, Type as WpType}; @@ -90,39 +90,33 @@ lazy_static! { ; mov r13, rdx ; mov r12, rcx - ; mov rdi, r13 - ; cmp r15, r14 - ; je >stack_ready - ; mov rdi, [r15] - ; add r15, 8 - - ; mov rsi, r13 - ; cmp r15, r14 - ; je >stack_ready - ; mov rsi, [r15] - ; add r15, 8 - - ; mov rdx, r13 - ; cmp r15, r14 - ; je >stack_ready - ; mov rdx, [r15] - ; add r15, 8 - - ; mov rcx, r13 - ; cmp r15, r14 - ; je >stack_ready - ; mov rcx, [r15] - ; add r15, 8 - - ; mov r8, r13 - ; cmp r15, r14 - ; je >stack_ready - ; mov r8, [r15] - ; add r15, 8 - - ; mov r9, r13 - ; cmp r15, r14 - ; je >stack_ready + ; mov rdi, r13 // ctx + + ; sub r14, 8 + ; cmp r14, r15 + ; jb >stack_ready + ; mov rsi, [r14] + + ; sub r14, 8 + ; cmp r14, r15 + ; jb >stack_ready + ; mov rdx, [r14] + + ; sub r14, 8 + ; cmp r14, r15 + ; jb >stack_ready + ; mov rcx, [r14] + + ; sub r14, 8 + ; cmp r14, r15 + ; jb >stack_ready + ; mov r8, [r14] + + ; sub r14, 8 + ; cmp r14, r15 + ; jb >stack_ready + ; mov r9, [r14] + ; ud2 // FIXME ; stack_ready: @@ -255,6 +249,7 @@ pub struct X64FunctionCode { enum FuncPtrInner {} #[repr(transparent)] +#[derive(Copy, Clone, Debug)] struct FuncPtr(*const FuncPtrInner); unsafe impl Send for FuncPtr {} unsafe impl Sync for FuncPtr {} @@ -269,15 +264,44 @@ pub struct X64ExecutionContext { func_import_count: usize, } -impl FuncResolver for X64ExecutionContext { +pub struct X64RuntimeResolver { + code: ExecutableBuffer, + local_pointers: Vec, +} + +impl X64ExecutionContext { + fn get_runtime_resolver(&self, module_info: &ModuleInfo) -> Result { + let mut assembler = Assembler::new().unwrap(); + let mut offsets: Vec = vec! []; + + for i in self.func_import_count..self.function_pointers.len() { + offsets.push(assembler.offset()); + X64FunctionCode::emit_managed_call_trampoline( + &mut assembler, + module_info, + self.function_pointers[i], + self.signatures[self.function_signatures[FuncIndex::new(i)]].params().len(), + )?; + } + + let code = assembler.finalize().unwrap(); + let local_pointers: Vec = offsets.iter().map(|x| FuncPtr(code.ptr(*x) as _)).collect(); + + Ok(X64RuntimeResolver { + code: code, + local_pointers: local_pointers, + }) + } +} + +impl FuncResolver for X64RuntimeResolver { fn get( &self, _module: &ModuleInner, _local_func_index: LocalFuncIndex, ) -> Option> { NonNull::new( - self.function_pointers[_local_func_index.index() as usize + self.func_import_count].0 - as *mut vm::Func, + self.local_pointers[_local_func_index.index() as usize].0 as *mut vm::Func, ) } } @@ -499,7 +523,7 @@ impl X64ModuleCodeGenerator { } } -impl ModuleCodeGenerator for X64ModuleCodeGenerator { +impl ModuleCodeGenerator for X64ModuleCodeGenerator { fn next_function(&mut self) -> Result<&mut X64FunctionCode, CodegenError> { let (mut assembler, mut function_labels, br_table_data) = match self.functions.last_mut() { Some(x) => ( @@ -549,7 +573,7 @@ impl ModuleCodeGenerator for X64ModuleCode Ok(self.functions.last_mut().unwrap()) } - fn finalize(mut self) -> Result { + fn finalize(mut self, module_info: &ModuleInfo) -> Result<(X64ExecutionContext, X64RuntimeResolver), CodegenError> { let (mut assembler, mut br_table_data) = match self.functions.last_mut() { Some(x) => (x.assembler.take().unwrap(), x.br_table_data.take().unwrap()), None => { @@ -593,7 +617,7 @@ impl ModuleCodeGenerator for X64ModuleCode out_labels.push(FuncPtr(output.ptr(*offset) as _)); } - Ok(X64ExecutionContext { + let ctx = X64ExecutionContext { code: output, functions: self.functions, br_table_data: br_table_data, @@ -615,7 +639,10 @@ impl ModuleCodeGenerator for X64ModuleCode }) } }, - }) + }; + let resolver = ctx.get_runtime_resolver(module_info)?; + + Ok((ctx, resolver)) } fn feed_signatures( @@ -1307,6 +1334,100 @@ impl X64FunctionCode { Ok(()) } + fn emit_managed_call_trampoline(assembler: &mut Assembler, info: &ModuleInfo, target: FuncPtr, num_params: usize) -> Result<(), CodegenError> { + dynasm!( + assembler + ; push rbp + ; mov rbp, rsp + ); + + for i in 0..num_params { + match i { + i if i < 5 => { + let reg = match i { + 0 => Register::RSI, + 1 => Register::RDX, + 2 => Register::RCX, + 3 => Register::R8, + 4 => Register::R9, + _ => unreachable!(), + }; + dynasm!( + assembler + ; push Rq(reg as u8) + ); + } + i => { + let offset = (i - 5) * 8; + dynasm!( + assembler + ; mov rax, [rbp + (16 + offset) as i32] + ; push rax + ); + } + } + } + + + dynasm!( + assembler + ; mov r8, rdi // vmctx + ; mov rdx, QWORD (target.0 as usize as i64) + ; mov rsi, QWORD (num_params * 8) as i64 + ; mov rdi, rsp + ); + + let has_memory = if info.memories.len() > 0 { + if info.memories.len() != 1 || info.imported_memories.len() != 0 { + return Err(CodegenError { + message: "only one linear memory is supported", + }); + } + dynasm!( + assembler + ; mov rcx, r8 => vm::Ctx.memories + ); + true + } else if info.imported_memories.len() > 0 { + if info.memories.len() != 0 || info.imported_memories.len() != 1 { + return Err(CodegenError{ + message: "only one linear memory is supported", + }); + } + dynasm!( + assembler + ; mov rcx, r8 => vm::Ctx.imported_memories + ); + true + } else { + false + }; + + if has_memory { + dynasm!( + assembler + ; mov rcx, [rcx] + ; mov rcx, rcx => LocalMemory.base + ); + } else { + dynasm!( + assembler + ; mov rcx, 0 + ); + } + + dynasm!( + assembler + ; mov rax, QWORD (*CALL_WASM as usize as i64) + ; call rax + ; mov rsp, rbp + ; pop rbp + ; ret + ); + + Ok(()) + } + fn emit_native_call_trampoline( assembler: &mut Assembler, target: unsafe extern "C" fn( @@ -4565,7 +4686,17 @@ unsafe extern "C" fn invoke_import( let vmctx: &mut vm::Ctx = &mut *vmctx; let import = (*vmctx.imported_funcs.offset(import_id as isize)).func; - return 0; // TODO: Fix this. + let n_args = (stack_base as usize - stack_top as usize) / 8; + + /*println!("Calling import: {:?} with vmctx = {:?}, n_args = {}", + import, + vmctx as *mut _, + n_args, + ); + + for i in 0..n_args { + println!("Arg: {:?}", * ((stack_top as usize + i * 8) as *const *const ())); + }*/ CONSTRUCT_STACK_AND_CALL_NATIVE(stack_top, stack_base, vmctx, import) } diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index 00602782d79..7e8e17cb843 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -63,10 +63,10 @@ impl Compiler for SinglePassCompiler { fn compile(&self, wasm: &[u8], _: Token) -> CompileResult { let mut mcg = codegen_x64::X64ModuleCodeGenerator::new(); let info = parse::read_module(wasm, Backend::Dynasm, &mut mcg)?; - let ec = mcg.finalize()?; + let (ec, resolver) = mcg.finalize(&info)?; Ok(ModuleInner { cache_gen: Box::new(Placeholder), - func_resolver: Box::new(Placeholder), + func_resolver: Box::new(resolver), protected_caller: Box::new(ec), info: info, }) diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs index e4909a96913..806517d4677 100644 --- a/lib/dynasm-backend/src/parse.rs +++ b/lib/dynasm-backend/src/parse.rs @@ -1,7 +1,7 @@ use crate::codegen::{CodegenError, FunctionCodeGenerator, ModuleCodeGenerator}; use std::sync::Arc; use wasmer_runtime_core::{ - backend::{Backend, ProtectedCaller}, + backend::{Backend, ProtectedCaller, FuncResolver}, module::{ DataInitializer, ExportIndex, ImportName, ModuleInfo, StringTable, StringTableBuilder, TableInitializer, @@ -39,9 +39,10 @@ impl From for LoadError { } pub fn read_module< - MCG: ModuleCodeGenerator, + MCG: ModuleCodeGenerator, FCG: FunctionCodeGenerator, PC: ProtectedCaller, + FR: FuncResolver, >( wasm: &[u8], backend: Backend, From f8fe9990158f89f632b739d7d011d4009c30efe0 Mon Sep 17 00:00:00 2001 From: losfair Date: Sun, 17 Mar 2019 10:27:14 +0800 Subject: [PATCH 069/100] Implemented protected call and floating point traps; passing all spectests! --- lib/dynasm-backend/Cargo.toml | 2 + lib/dynasm-backend/src/codegen.rs | 1 + lib/dynasm-backend/src/codegen_x64.rs | 274 +++++++++++++++++++++++-- lib/dynasm-backend/src/lib.rs | 7 + lib/dynasm-backend/src/parse.rs | 27 +++ lib/dynasm-backend/src/protect_unix.rs | 202 ++++++++++++++++++ lib/runtime-core/src/types.rs | 4 +- 7 files changed, 497 insertions(+), 20 deletions(-) create mode 100644 lib/dynasm-backend/src/protect_unix.rs diff --git a/lib/dynasm-backend/Cargo.toml b/lib/dynasm-backend/Cargo.toml index 26ff42a8dec..428a90f4760 100644 --- a/lib/dynasm-backend/Cargo.toml +++ b/lib/dynasm-backend/Cargo.toml @@ -14,3 +14,5 @@ dynasm = "0.3.0" dynasmrt = "0.3.1" lazy_static = "1.2.0" byteorder = "1" +nix = "0.13.0" +libc = "0.2.49" diff --git a/lib/dynasm-backend/src/codegen.rs b/lib/dynasm-backend/src/codegen.rs index 5bb299cb105..e6b083d050e 100644 --- a/lib/dynasm-backend/src/codegen.rs +++ b/lib/dynasm-backend/src/codegen.rs @@ -9,6 +9,7 @@ use wasmer_runtime_core::{ use wasmparser::{Operator, Type as WpType}; pub trait ModuleCodeGenerator { + fn check_precondition(&mut self, module_info: &ModuleInfo) -> Result<(), CodegenError>; fn next_function(&mut self) -> Result<&mut FCG, CodegenError>; fn finalize(self, module_info: &ModuleInfo) -> Result<(PC, FR), CodegenError>; fn feed_signatures( diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index ed7cbf0aa22..ca3ebfcaa12 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -24,6 +24,7 @@ use wasmer_runtime_core::{ vm::{self, ImportBacking, LocalGlobal, LocalTable, LocalMemory}, }; use wasmparser::{Operator, Type as WpType}; +use crate::protect_unix; thread_local! { static CURRENT_EXECUTION_CONTEXT: RefCell> = RefCell::new(Vec::new()); @@ -384,17 +385,21 @@ impl ProtectedCaller for X64ExecutionContext { CURRENT_EXECUTION_CONTEXT.with(|x| x.borrow_mut().push(self)); let ret = unsafe { - CALL_WASM( - param_buf.as_ptr(), - param_buf.len(), - ptr, - memory_base, - _vmctx, - ) + protect_unix::call_protected(|| { + CALL_WASM( + param_buf.as_ptr(), + param_buf.len(), + ptr, + memory_base, + _vmctx, + ) + }) }; CURRENT_EXECUTION_CONTEXT.with(|x| x.borrow_mut().pop().unwrap()); + let ret = ret?; + Ok(if let Some(ty) = return_ty { vec![match ty { WpType::I32 => Value::I32(ret as i32), @@ -524,6 +529,19 @@ impl X64ModuleCodeGenerator { } impl ModuleCodeGenerator for X64ModuleCodeGenerator { + fn check_precondition(&mut self, module_info: &ModuleInfo) -> Result<(), CodegenError> { + for mem in module_info.memories.iter().map(|(_, v)| v).chain(module_info.imported_memories.iter().map(|(_, v)| &v.1)) { + match mem.memory_type() { + MemoryType::Dynamic => return Err(CodegenError { + message: "dynamic memory isn't supported yet" + }), + _ => {} + } + } + + Ok(()) + } + fn next_function(&mut self) -> Result<&mut X64FunctionCode, CodegenError> { let (mut assembler, mut function_labels, br_table_data) = match self.functions.last_mut() { Some(x) => ( @@ -1428,6 +1446,98 @@ impl X64FunctionCode { Ok(()) } + fn emit_f32_int_conv_check( + assembler: &mut Assembler, + reg: Register, + lower_bound: f32, + upper_bound: f32, + ) { + let lower_bound = f32::to_bits(lower_bound); + let upper_bound = f32::to_bits(upper_bound); + + dynasm!( + assembler + ; movq xmm5, r15 + + // underflow + ; movd xmm1, Rd(reg as u8) + ; mov r15d, lower_bound as i32 + ; movd xmm2, r15d + ; vcmpltss xmm0, xmm1, xmm2 + ; movd r15d, xmm0 + ; cmp r15d, 1 + ; je >trap + + // overflow + ; mov r15d, upper_bound as i32 + ; movd xmm2, r15d + ; vcmpgtss xmm0, xmm1, xmm2 + ; movd r15d, xmm0 + ; cmp r15d, 1 + ; je >trap + + // NaN + ; vcmpeqss xmm0, xmm1, xmm1 + ; movd r15d, xmm0 + ; cmp r15d, 0 + ; je >trap + + ; movq r15, xmm5 + ; jmp >ok + + ; trap: + ; ud2 + + ; ok: + ); + } + + fn emit_f64_int_conv_check( + assembler: &mut Assembler, + reg: Register, + lower_bound: f64, + upper_bound: f64, + ) { + let lower_bound = f64::to_bits(lower_bound); + let upper_bound = f64::to_bits(upper_bound); + + dynasm!( + assembler + ; movq xmm5, r15 + + // underflow + ; movq xmm1, Rq(reg as u8) + ; mov r15, QWORD lower_bound as i64 + ; movq xmm2, r15 + ; vcmpltsd xmm0, xmm1, xmm2 + ; movd r15d, xmm0 + ; cmp r15d, 1 + ; je >trap + + // overflow + ; mov r15, QWORD upper_bound as i64 + ; movq xmm2, r15 + ; vcmpgtsd xmm0, xmm1, xmm2 + ; movd r15d, xmm0 + ; cmp r15d, 1 + ; je >trap + + // NaN + ; vcmpeqsd xmm0, xmm1, xmm1 + ; movd r15d, xmm0 + ; cmp r15d, 0 + ; je >trap + + ; movq r15, xmm5 + ; jmp >ok + + ; trap: + ; ud2 + + ; ok: + ); + } + fn emit_native_call_trampoline( assembler: &mut Assembler, target: unsafe extern "C" fn( @@ -4137,11 +4247,39 @@ impl FunctionCodeGenerator for X64FunctionCode { WpType::F32 )?; } - Operator::I32TruncUF32 | Operator::I32TruncSF32 => { + Operator::I32TruncUF32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + Self::emit_f32_int_conv_check( + assembler, + reg, + -1.0, + 4294967296.0, + ); + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; roundss xmm1, xmm1, 3 + ; cvtss2si Rd(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::I32 + )?; + } + Operator::I32TruncSF32 => { Self::emit_unop( assembler, &mut self.value_stack, |assembler, value_stack, reg| { + Self::emit_f32_int_conv_check( + assembler, + reg, + -2147483904.0, + 2147483648.0 + ); dynasm!( assembler ; movd xmm1, Rd(reg as u8) @@ -4153,11 +4291,39 @@ impl FunctionCodeGenerator for X64FunctionCode { WpType::I32 )?; } - Operator::I64TruncUF32 | Operator::I64TruncSF32 => { + Operator::I64TruncUF32 => { Self::emit_unop( assembler, &mut self.value_stack, |assembler, value_stack, reg| { + Self::emit_f32_int_conv_check( + assembler, + reg, + -1.0, + 18446744073709551616.0, + ); + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; roundss xmm1, xmm1, 3 + ; cvtss2si Rq(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::I64 + )?; + } + Operator::I64TruncSF32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + Self::emit_f32_int_conv_check( + assembler, + reg, + -9223373136366403584.0, + 9223372036854775808.0, + ); dynasm!( assembler ; movd xmm1, Rd(reg as u8) @@ -4514,11 +4680,18 @@ impl FunctionCodeGenerator for X64FunctionCode { WpType::F64 )?; } - Operator::I32TruncUF64 | Operator::I32TruncSF64 => { + Operator::I32TruncUF64 => { Self::emit_unop( assembler, &mut self.value_stack, |assembler, value_stack, reg| { + Self::emit_f64_int_conv_check( + assembler, + reg, + -1.0, + 4294967296.0, + ); + dynasm!( assembler ; movq xmm1, Rq(reg as u8) @@ -4530,11 +4703,64 @@ impl FunctionCodeGenerator for X64FunctionCode { WpType::I32 )?; } - Operator::I64TruncUF64 | Operator::I64TruncSF64 => { + Operator::I32TruncSF64 => { Self::emit_unop( assembler, &mut self.value_stack, |assembler, value_stack, reg| { + Self::emit_f64_int_conv_check( + assembler, + reg, + -2147483649.0, + 2147483648.0, + ); + + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; roundsd xmm1, xmm1, 3 + ; cvtsd2si Rd(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::I32 + )?; + } + Operator::I64TruncUF64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + Self::emit_f64_int_conv_check( + assembler, + reg, + -1.0, + 18446744073709551616.0, + ); + + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; roundsd xmm1, xmm1, 3 + ; cvtsd2si Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::I64 + )?; + } + Operator::I64TruncSF64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, value_stack, reg| { + Self::emit_f64_int_conv_check( + assembler, + reg, + -9223372036854777856.0, + 9223372036854775808.0, + ); + dynasm!( assembler ; movq xmm1, Rq(reg as u8) @@ -4553,7 +4779,8 @@ impl FunctionCodeGenerator for X64FunctionCode { LocalOrImport::Local(local_mem_index) => { let mem_desc = &module_info.memories[local_mem_index]; match mem_desc.memory_type() { - MemoryType::Dynamic => self.native_trampolines.memory_size_dynamic_local, + //MemoryType::Dynamic => self.native_trampolines.memory_size_dynamic_local, + MemoryType::Dynamic => unimplemented!(), MemoryType::Static => self.native_trampolines.memory_size_static_local, MemoryType::SharedStatic => self.native_trampolines.memory_size_shared_local, } @@ -4561,7 +4788,8 @@ impl FunctionCodeGenerator for X64FunctionCode { LocalOrImport::Import(import_mem_index) => { let mem_desc = &module_info.imported_memories[import_mem_index].1; match mem_desc.memory_type() { - MemoryType::Dynamic => self.native_trampolines.memory_size_dynamic_import, + //MemoryType::Dynamic => self.native_trampolines.memory_size_dynamic_import, + MemoryType::Dynamic => unimplemented!(), MemoryType::Static => self.native_trampolines.memory_size_static_import, MemoryType::SharedStatic => self.native_trampolines.memory_size_shared_import, } @@ -4581,7 +4809,8 @@ impl FunctionCodeGenerator for X64FunctionCode { LocalOrImport::Local(local_mem_index) => { let mem_desc = &module_info.memories[local_mem_index]; match mem_desc.memory_type() { - MemoryType::Dynamic => self.native_trampolines.memory_grow_dynamic_local, + //MemoryType::Dynamic => self.native_trampolines.memory_grow_dynamic_local, + MemoryType::Dynamic => unimplemented!(), MemoryType::Static => self.native_trampolines.memory_grow_static_local, MemoryType::SharedStatic => self.native_trampolines.memory_grow_shared_local, } @@ -4589,7 +4818,8 @@ impl FunctionCodeGenerator for X64FunctionCode { LocalOrImport::Import(import_mem_index) => { let mem_desc = &module_info.imported_memories[import_mem_index].1; match mem_desc.memory_type() { - MemoryType::Dynamic => self.native_trampolines.memory_grow_dynamic_import, + //MemoryType::Dynamic => self.native_trampolines.memory_grow_dynamic_import, + MemoryType::Dynamic => unimplemented!(), MemoryType::Static => self.native_trampolines.memory_grow_static_import, MemoryType::SharedStatic => self.native_trampolines.memory_grow_shared_import, } @@ -4725,13 +4955,20 @@ unsafe extern "C" fn call_indirect( CallIndirectLocalOrImport::Import => &*(*(*vmctx).imported_tables), } ; if elem_index >= table.count as usize { - panic!("element index out of bounds"); + eprintln!("element index out of bounds"); + unsafe { protect_unix::trigger_trap(); } } let anyfunc = &*(table.base as *mut vm::Anyfunc).offset(elem_index as isize); let ctx: &X64ExecutionContext = &*CURRENT_EXECUTION_CONTEXT.with(|x| *x.borrow().last().unwrap()); - let func_index = anyfunc.func_index.unwrap(); + let func_index = match anyfunc.func_index { + Some(x) => x, + None => { + eprintln!("empty table entry"); + unsafe { protect_unix::trigger_trap(); } + } + }; /*println!( "SIG INDEX = {}, FUNC INDEX = {:?}, ELEM INDEX = {}", @@ -4741,7 +4978,8 @@ unsafe extern "C" fn call_indirect( if ctx.signatures[SigIndex::new(sig_index)] != ctx.signatures[ctx.function_signatures[func_index]] { - panic!("signature mismatch"); + eprintln!("signature mismatch"); + unsafe { protect_unix::trigger_trap(); } } let func = ctx.function_pointers[func_index.index() as usize].0; diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index b6b0354041a..080cafca769 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -1,5 +1,11 @@ #![feature(proc_macro_hygiene)] +#[cfg(not(any( + all(target_os = "macos", target_arch = "x86_64"), + all(target_os = "linux", target_arch = "x86_64"), +)))] +compile_error!("This crate doesn't yet support compiling on operating systems other than linux and macos and architectures other than x86_64"); + #[macro_use] extern crate dynasmrt; @@ -15,6 +21,7 @@ mod codegen; mod codegen_x64; mod parse; mod stack; +mod protect_unix; use crate::codegen::{CodegenError, ModuleCodeGenerator}; use crate::parse::LoadError; diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs index 806517d4677..c9da4a0cd23 100644 --- a/lib/dynasm-backend/src/parse.rs +++ b/lib/dynasm-backend/src/parse.rs @@ -18,6 +18,7 @@ use wasmparser::{ BinaryReaderError, CodeSectionReader, Data, DataKind, Element, ElementKind, Export, ExternalKind, FuncType, Import, ImportSectionEntryType, InitExpr, ModuleReader, Operator, SectionCode, Type as WpType, + WasmDecoder, }; #[derive(Debug)] @@ -38,6 +39,30 @@ impl From for LoadError { } } +fn validate(bytes: &[u8]) -> Result<(), LoadError> { + let mut parser = wasmparser::ValidatingParser::new( + bytes, + Some(wasmparser::ValidatingParserConfig { + operator_config: wasmparser::OperatorValidatorConfig { + enable_threads: false, + enable_reference_types: false, + enable_simd: false, + enable_bulk_memory: false, + }, + mutable_global_imports: false, + }), + ); + + loop { + let state = parser.read(); + match *state { + wasmparser::ParserState::EndWasm => break Ok(()), + wasmparser::ParserState::Error(err) => Err(LoadError::Parse(err))?, + _ => {} + } + } +} + pub fn read_module< MCG: ModuleCodeGenerator, FCG: FunctionCodeGenerator, @@ -48,6 +73,7 @@ pub fn read_module< backend: Backend, mcg: &mut MCG, ) -> Result { + validate(wasm)?; let mut info = ModuleInfo { memories: Map::new(), globals: Map::new(), @@ -279,6 +305,7 @@ pub fn read_module< } .into()); } + mcg.check_precondition(&info)?; for i in 0..code_reader.get_count() { let item = code_reader.read()?; let mut fcg = mcg.next_function()?; diff --git a/lib/dynasm-backend/src/protect_unix.rs b/lib/dynasm-backend/src/protect_unix.rs new file mode 100644 index 00000000000..38fa5e954e2 --- /dev/null +++ b/lib/dynasm-backend/src/protect_unix.rs @@ -0,0 +1,202 @@ +//! Installing signal handlers allows us to handle traps and out-of-bounds memory +//! accesses that occur when runniing webassembly. +//! +//! This code is inspired by: https://github.com/pepyakin/wasmtime/commit/625a2b6c0815b21996e111da51b9664feb174622 +//! +//! When a WebAssembly module triggers any traps, we perform recovery here. +//! +//! This module uses TLS (thread-local storage) to track recovery information. Since the four signals we're handling +//! are very special, the async signal unsafety of Rust's TLS implementation generally does not affect the correctness here +//! unless you have memory unsafety elsewhere in your code. +//! +use libc::{c_int, c_void, siginfo_t}; +use nix::sys::signal::{ + sigaction, SaFlags, SigAction, SigHandler, SigSet, Signal, SIGBUS, SIGFPE, SIGILL, SIGSEGV, +}; +use std::cell::{Cell, UnsafeCell}; +use std::ptr; +use std::sync::Once; +use wasmer_runtime_core::error::{RuntimeError, RuntimeResult}; + +extern "C" fn signal_trap_handler( + signum: ::nix::libc::c_int, + siginfo: *mut siginfo_t, + ucontext: *mut c_void, +) { + unsafe { + do_unwind(signum, siginfo as _, ucontext); + } +} + +extern "C" { + pub fn setjmp(env: *mut c_void) -> c_int; + fn longjmp(env: *mut c_void, val: c_int) -> !; +} + +pub unsafe fn install_sighandler() { + let sa = SigAction::new( + SigHandler::SigAction(signal_trap_handler), + SaFlags::SA_ONSTACK, + SigSet::empty(), + ); + sigaction(SIGFPE, &sa).unwrap(); + sigaction(SIGILL, &sa).unwrap(); + sigaction(SIGSEGV, &sa).unwrap(); + sigaction(SIGBUS, &sa).unwrap(); +} + +const SETJMP_BUFFER_LEN: usize = 27; +pub static SIGHANDLER_INIT: Once = Once::new(); + +thread_local! { + pub static SETJMP_BUFFER: UnsafeCell<[c_int; SETJMP_BUFFER_LEN]> = UnsafeCell::new([0; SETJMP_BUFFER_LEN]); + pub static CAUGHT_ADDRESSES: Cell<(*const c_void, *const c_void)> = Cell::new((ptr::null(), ptr::null())); + pub static CURRENT_EXECUTABLE_BUFFER: Cell<*const c_void> = Cell::new(ptr::null()); +} + +pub unsafe fn trigger_trap() -> ! { + let jmp_buf = SETJMP_BUFFER.with(|buf| buf.get()); + + longjmp(jmp_buf as *mut c_void, 0) +} + +pub fn call_protected(f: impl FnOnce() -> T) -> RuntimeResult { + unsafe { + let jmp_buf = SETJMP_BUFFER.with(|buf| buf.get()); + let prev_jmp_buf = *jmp_buf; + + SIGHANDLER_INIT.call_once(|| { + install_sighandler(); + }); + + let signum = setjmp(jmp_buf as *mut _); + if signum != 0 { + *jmp_buf = prev_jmp_buf; + + let (faulting_addr, inst_ptr) = CAUGHT_ADDRESSES.with(|cell| cell.get()); + + let signal = match Signal::from_c_int(signum) { + Ok(SIGFPE) => "floating-point exception", + Ok(SIGILL) => "illegal instruction", + Ok(SIGSEGV) => "segmentation violation", + Ok(SIGBUS) => "bus error", + Err(_) => "error while getting the Signal", + _ => "unkown trapped signal", + }; + // When the trap-handler is fully implemented, this will return more information. + Err(RuntimeError::Trap { + msg: format!("unknown trap at {:p} - {}", faulting_addr, signal).into(), + } + .into()) + } else { + let ret = f(); // TODO: Switch stack? + *jmp_buf = prev_jmp_buf; + Ok(ret) + } + } +} + +/// Unwinds to last protected_call. +pub unsafe fn do_unwind(signum: i32, siginfo: *const c_void, ucontext: *const c_void) -> ! { + // Since do_unwind is only expected to get called from WebAssembly code which doesn't hold any host resources (locks etc.) + // itself, accessing TLS here is safe. In case any other code calls this, it often indicates a memory safety bug and you should + // temporarily disable the signal handlers to debug it. + + let jmp_buf = SETJMP_BUFFER.with(|buf| buf.get()); + if *jmp_buf == [0; SETJMP_BUFFER_LEN] { + ::std::process::abort(); + } + + CAUGHT_ADDRESSES.with(|cell| cell.set(get_faulting_addr_and_ip(siginfo, ucontext))); + + longjmp(jmp_buf as *mut ::nix::libc::c_void, signum) +} + +#[cfg(all(target_os = "linux", target_arch = "x86_64"))] +unsafe fn get_faulting_addr_and_ip( + siginfo: *const c_void, + ucontext: *const c_void, +) -> (*const c_void, *const c_void) { + use libc::{ucontext_t, RIP}; + + #[allow(dead_code)] + #[repr(C)] + struct siginfo_t { + si_signo: i32, + si_errno: i32, + si_code: i32, + si_addr: u64, + // ... + } + + let siginfo = siginfo as *const siginfo_t; + let si_addr = (*siginfo).si_addr; + + let ucontext = ucontext as *const ucontext_t; + let rip = (*ucontext).uc_mcontext.gregs[RIP as usize]; + + (si_addr as _, rip as _) +} + +#[cfg(all(target_os = "macos", target_arch = "x86_64"))] +unsafe fn get_faulting_addr_and_ip( + siginfo: *const c_void, + ucontext: *const c_void, +) -> (*const c_void, *const c_void) { + #[allow(dead_code)] + #[repr(C)] + struct ucontext_t { + uc_onstack: u32, + uc_sigmask: u32, + uc_stack: libc::stack_t, + uc_link: *const ucontext_t, + uc_mcsize: u64, + uc_mcontext: *const mcontext_t, + } + #[repr(C)] + struct exception_state { + trapno: u16, + cpu: u16, + err: u32, + faultvaddr: u64, + } + #[repr(C)] + struct regs { + rax: u64, + rbx: u64, + rcx: u64, + rdx: u64, + rdi: u64, + rsi: u64, + rbp: u64, + rsp: u64, + r8: u64, + r9: u64, + r10: u64, + r11: u64, + r12: u64, + r13: u64, + r14: u64, + r15: u64, + rip: u64, + rflags: u64, + cs: u64, + fs: u64, + gs: u64, + } + #[allow(dead_code)] + #[repr(C)] + struct mcontext_t { + es: exception_state, + ss: regs, + // ... + } + + let siginfo = siginfo as *const siginfo_t; + let si_addr = (*siginfo).si_addr; + + let ucontext = ucontext as *const ucontext_t; + let rip = (*(*ucontext).uc_mcontext).ss.rip; + + (si_addr, rip as _) +} diff --git a/lib/runtime-core/src/types.rs b/lib/runtime-core/src/types.rs index e1967ba6826..3b641e043a2 100644 --- a/lib/runtime-core/src/types.rs +++ b/lib/runtime-core/src/types.rs @@ -251,8 +251,8 @@ impl MemoryDescriptor { pub fn memory_type(self) -> MemoryType { match (self.maximum.is_some(), self.shared) { (true, true) => MemoryType::SharedStatic, - (true, false) => MemoryType::Static, - (false, false) => MemoryType::Dynamic, + (true, false) | (false, false) => MemoryType::Static, + //(false, false) => MemoryType::Dynamic, (false, true) => panic!("shared memory without a max is not allowed"), } } From d8d39c309cf1f978c9469a419b3c4ae150c44ac2 Mon Sep 17 00:00:00 2001 From: losfair Date: Sun, 17 Mar 2019 10:54:50 +0800 Subject: [PATCH 070/100] Fix warnings. --- lib/dynasm-backend/src/codegen.rs | 2 - lib/dynasm-backend/src/codegen_x64.rs | 317 +++++++++++-------------- lib/dynasm-backend/src/lib.rs | 15 +- lib/dynasm-backend/src/parse.rs | 5 +- lib/dynasm-backend/src/protect_unix.rs | 2 +- lib/dynasm-backend/src/stack.rs | 9 - 6 files changed, 143 insertions(+), 207 deletions(-) diff --git a/lib/dynasm-backend/src/codegen.rs b/lib/dynasm-backend/src/codegen.rs index e6b083d050e..59b7b470697 100644 --- a/lib/dynasm-backend/src/codegen.rs +++ b/lib/dynasm-backend/src/codegen.rs @@ -1,9 +1,7 @@ -use std::sync::Arc; use wasmer_runtime_core::{ backend::{ProtectedCaller, FuncResolver}, structures::Map, types::{FuncIndex, FuncSig, SigIndex}, - units::Pages, module::ModuleInfo, }; use wasmparser::{Operator, Type as WpType}; diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index ca3ebfcaa12..5d93066cf3e 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -8,16 +8,15 @@ use dynasmrt::{ }; use std::cell::RefCell; use std::ptr::NonNull; -use std::sync::Mutex; use std::{any::Any, collections::HashMap, sync::Arc}; use wasmer_runtime_core::{ - backend::{Backend, Compiler, FuncResolver, ProtectedCaller, Token, UserTrapper}, - error::{CompileError, CompileResult, RuntimeError, RuntimeResult}, - module::{ModuleInfo, ModuleInner, StringTable}, + backend::{FuncResolver, ProtectedCaller, Token, UserTrapper}, + error::{RuntimeError, RuntimeResult}, + module::{ModuleInfo, ModuleInner}, structures::{Map, TypedIndex}, types::{ - FuncIndex, FuncSig, GlobalIndex, LocalFuncIndex, LocalGlobalIndex, MemoryIndex, SigIndex, - TableIndex, Type, Value, LocalMemoryIndex, ImportedMemoryIndex, LocalOrImport, + FuncIndex, FuncSig, LocalFuncIndex, LocalGlobalIndex, MemoryIndex, SigIndex, + Type, Value, LocalMemoryIndex, ImportedMemoryIndex, LocalOrImport, }, memory::MemoryType, units::Pages, @@ -139,6 +138,7 @@ lazy_static! { #[repr(u8)] #[derive(Copy, Clone, Debug, Eq, PartialEq)] +#[allow(dead_code)] pub enum Register { RAX, RCX, @@ -180,7 +180,6 @@ impl Register { } pub fn is_used(&self, stack: &ValueStack) -> bool { - use self::Register::*; for val in &stack.values { match val.location { ValueLocation::Register(x) => { @@ -196,14 +195,8 @@ impl Register { } } -#[repr(u64)] -#[derive(Copy, Clone, Debug)] -pub enum TrapCode { - Unreachable, -} - +#[allow(dead_code)] pub struct NativeTrampolines { - trap_unreachable: DynamicLabel, memory_size_dynamic_local: DynamicLabel, memory_size_static_local: DynamicLabel, memory_size_shared_local: DynamicLabel, @@ -233,8 +226,6 @@ pub struct X64FunctionCode { function_signatures: Arc>, native_trampolines: Arc, - id: usize, - begin_label: DynamicLabel, begin_offset: AssemblyOffset, assembler: Option, function_labels: Option)>>, @@ -261,12 +252,12 @@ pub struct X64ExecutionContext { signatures: Arc>, function_signatures: Arc>, function_pointers: Vec, - br_table_data: Vec>, + _br_table_data: Vec>, func_import_count: usize, } pub struct X64RuntimeResolver { - code: ExecutableBuffer, + _code: ExecutableBuffer, local_pointers: Vec, } @@ -289,7 +280,7 @@ impl X64ExecutionContext { let local_pointers: Vec = offsets.iter().map(|x| FuncPtr(code.ptr(*x) as _)).collect(); Ok(X64RuntimeResolver { - code: code, + _code: code, local_pointers: local_pointers, }) } @@ -417,7 +408,7 @@ impl ProtectedCaller for X64ExecutionContext { pub struct Trapper; impl UserTrapper for Trapper { - unsafe fn do_early_trap(&self, data: Box) -> ! { + unsafe fn do_early_trap(&self, _data: Box) -> ! { panic!("do_early_trap"); } } @@ -436,12 +427,6 @@ impl X64ModuleCodeGenerator { pub fn new() -> X64ModuleCodeGenerator { let mut assembler = Assembler::new().unwrap(); let nt = NativeTrampolines { - trap_unreachable: X64FunctionCode::emit_native_call_trampoline( - &mut assembler, - do_trap, - 0usize, - TrapCode::Unreachable, - ), memory_size_dynamic_local: X64FunctionCode::emit_native_call_trampoline( &mut assembler, _memory_size, @@ -573,8 +558,6 @@ impl ModuleCodeGenerator Result<(X64ExecutionContext, X64RuntimeResolver), CodegenError> { - let (mut assembler, mut br_table_data) = match self.functions.last_mut() { + let (assembler, mut br_table_data) = match self.functions.last_mut() { Some(x) => (x.assembler.take().unwrap(), x.br_table_data.take().unwrap()), None => { return Err(CodegenError { @@ -638,7 +621,7 @@ impl ModuleCodeGenerator x, @@ -711,7 +694,6 @@ impl X64FunctionCode { match info.location { ValueLocation::Register(_) => {} ValueLocation::Stack => { - let size = get_size_of_type(&info.ty)?; dynasm!( assembler ; add rsp, 8 @@ -867,7 +849,7 @@ impl X64FunctionCode { f: F, ) { let rcx_used = Register::RCX.is_used(value_stack); - if(rcx_used) { + if rcx_used { dynasm!( assembler ; push rcx @@ -878,7 +860,7 @@ impl X64FunctionCode { ; mov rcx, Rq(right as u8) ); f(assembler, left); - if(rcx_used) { + if rcx_used { dynasm!( assembler ; pop rcx @@ -1147,7 +1129,7 @@ impl X64FunctionCode { }); } - if let Some(ty) = ret_ty { + if let Some(_) = ret_ty { if value_stack.values.iter().last().map(|x| x.ty) != ret_ty { return Err(CodegenError { message: "value type != return type", @@ -1390,7 +1372,7 @@ impl X64FunctionCode { dynasm!( assembler ; mov r8, rdi // vmctx - ; mov rdx, QWORD (target.0 as usize as i64) + ; mov rdx, QWORD target.0 as usize as i64 ; mov rsi, QWORD (num_params * 8) as i64 ; mov rdi, rsp ); @@ -1436,7 +1418,7 @@ impl X64FunctionCode { dynasm!( assembler - ; mov rax, QWORD (*CALL_WASM as usize as i64) + ; mov rax, QWORD *CALL_WASM as usize as i64 ; call rax ; mov rsp, rbp ; pop rbp @@ -1564,15 +1546,15 @@ impl X64FunctionCode { dynasm!( assembler - ; mov rdi, QWORD (unsafe { ::std::mem::transmute_copy::(&ctx1) }) - ; mov rsi, QWORD (unsafe { ::std::mem::transmute_copy::(&ctx2) }) + ; mov rdi, QWORD unsafe { ::std::mem::transmute_copy::(&ctx1) } + ; mov rsi, QWORD unsafe { ::std::mem::transmute_copy::(&ctx2) } ; mov rdx, rsp ; mov rcx, rbp ; mov r8, r14 // vmctx ; mov r9, r15 // memory_base - ; mov rax, QWORD (0xfffffffffffffff0u64 as i64) + ; mov rax, QWORD 0xfffffffffffffff0u64 as i64 ; and rsp, rax - ; mov rax, QWORD (target as i64) + ; mov rax, QWORD target as i64 ; call rax ; mov rsp, rbp ; pop rbp @@ -1823,8 +1805,6 @@ impl FunctionCodeGenerator for X64FunctionCode { /// - Params in reversed order, caller initialized /// - Locals in reversed order, callee initialized fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError> { - let assembler = self.assembler.as_mut().unwrap(); - self.current_stack_offset += 8; self.locals.push(Local { ty: ty, @@ -2111,7 +2091,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; add Rd(left as u8), Rd(right as u8) @@ -2123,7 +2103,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; sub Rd(left as u8), Rd(right as u8) @@ -2135,7 +2115,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; imul Rd(left as u8), Rd(right as u8) @@ -2211,7 +2191,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; and Rd(left as u8), Rd(right as u8) @@ -2223,7 +2203,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; or Rd(left as u8), Rd(right as u8) @@ -2235,7 +2215,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; xor Rd(left as u8), Rd(right as u8) @@ -2247,7 +2227,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; cmp Rd(left as u8), Rd(right as u8) @@ -2263,7 +2243,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; cmp Rd(left as u8), Rd(right as u8) @@ -2280,7 +2260,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; cmp Rd(reg as u8), 0 @@ -2301,7 +2281,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; lzcnt Rd(reg as u8), Rd(reg as u8) @@ -2313,7 +2293,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; tzcnt Rd(reg as u8), Rd(reg as u8) @@ -2325,7 +2305,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; popcnt Rd(reg as u8), Rd(reg as u8) @@ -2410,7 +2390,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i32(assembler, left, right, |assembler| { dynasm!( assembler @@ -2424,7 +2404,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i32(assembler, left, right, |assembler| { dynasm!( assembler @@ -2438,7 +2418,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i32(assembler, left, right, |assembler| { dynasm!( assembler @@ -2452,7 +2432,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i32(assembler, left, right, |assembler| { dynasm!( assembler @@ -2466,7 +2446,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i32(assembler, left, right, |assembler| { dynasm!( assembler @@ -2480,7 +2460,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i32(assembler, left, right, |assembler| { dynasm!( assembler @@ -2494,7 +2474,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i32(assembler, left, right, |assembler| { dynasm!( assembler @@ -2508,7 +2488,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i32( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i32(assembler, left, right, |assembler| { dynasm!( assembler @@ -2541,7 +2521,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i64( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; add Rq(left as u8), Rq(right as u8) @@ -2553,7 +2533,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i64( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; sub Rq(left as u8), Rq(right as u8) @@ -2565,7 +2545,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i64( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; imul Rq(left as u8), Rq(right as u8) @@ -2641,7 +2621,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i64( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; and Rq(left as u8), Rq(right as u8) @@ -2653,7 +2633,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i64( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; or Rq(left as u8), Rq(right as u8) @@ -2665,7 +2645,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop_i64( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; xor Rq(left as u8), Rq(right as u8) @@ -2677,7 +2657,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; cmp Rq(left as u8), Rq(right as u8) @@ -2695,7 +2675,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; cmp Rq(left as u8), Rq(right as u8) @@ -2714,7 +2694,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; cmp Rq(reg as u8), 0 @@ -2737,7 +2717,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop_i64( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; lzcnt Rq(reg as u8), Rq(reg as u8) @@ -2749,7 +2729,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop_i64( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; tzcnt Rq(reg as u8), Rq(reg as u8) @@ -2761,7 +2741,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop_i64( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; popcnt Rq(reg as u8), Rq(reg as u8) @@ -2846,7 +2826,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i64(assembler, left, right, |assembler| { dynasm!( assembler @@ -2862,7 +2842,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i64(assembler, left, right, |assembler| { dynasm!( assembler @@ -2878,7 +2858,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i64(assembler, left, right, |assembler| { dynasm!( assembler @@ -2894,7 +2874,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i64(assembler, left, right, |assembler| { dynasm!( assembler @@ -2910,7 +2890,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i64(assembler, left, right, |assembler| { dynasm!( assembler @@ -2926,7 +2906,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i64(assembler, left, right, |assembler| { dynasm!( assembler @@ -2942,7 +2922,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i64(assembler, left, right, |assembler| { dynasm!( assembler @@ -2958,7 +2938,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { Self::emit_cmp_i64(assembler, left, right, |assembler| { dynasm!( assembler @@ -2974,7 +2954,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; movsx Rq(reg as u8), Rd(reg as u8) @@ -2988,7 +2968,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |_assembler, _value_stack, _reg| { // FIXME: Is it correct to do nothing here? }, WpType::I32, @@ -2999,7 +2979,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; mov Rd(reg as u8), Rd(reg as u8) // clear upper 32 bits @@ -3026,15 +3006,6 @@ impl FunctionCodeGenerator for X64FunctionCode { }); } Operator::Unreachable => { - /* - Self::emit_call_raw( - assembler, - &mut self.value_stack, - self.native_trampolines.trap_unreachable, - &[], - &[], - )?; - */ dynasm!( assembler ; ud2 @@ -3710,7 +3681,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; cvtsi2ss xmm1, Rd(reg as u8) @@ -3725,7 +3696,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; mov Rd(reg as u8), Rd(reg as u8) // clear upper 32 bits @@ -3741,7 +3712,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; cvtsi2ss xmm1, Rq(reg as u8) @@ -3770,7 +3741,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; test Rq(reg as u8), Rq(reg as u8) @@ -3801,7 +3772,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; cvtsi2sd xmm1, Rd(reg as u8) @@ -3816,7 +3787,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; mov Rd(reg as u8), Rd(reg as u8) // clear upper 32 bits @@ -3832,7 +3803,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; cvtsi2sd xmm1, Rq(reg as u8) @@ -3847,7 +3818,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; test Rq(reg as u8), Rq(reg as u8) @@ -3878,7 +3849,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; movd xmm1, Rd(reg as u8) @@ -3894,7 +3865,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; movq xmm1, Rq(reg as u8) @@ -3910,7 +3881,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movd xmm1, Rd(left as u8) @@ -3927,7 +3898,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movd xmm1, Rd(left as u8) @@ -3944,7 +3915,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movd xmm1, Rd(left as u8) @@ -3961,7 +3932,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movd xmm1, Rd(left as u8) @@ -3978,7 +3949,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movd xmm1, Rd(left as u8) @@ -3995,7 +3966,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movd xmm1, Rd(left as u8) @@ -4012,7 +3983,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movd xmm1, Rd(left as u8) @@ -4030,7 +4001,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movd xmm1, Rd(left as u8) @@ -4048,7 +4019,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movd xmm1, Rd(left as u8) @@ -4066,7 +4037,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movd xmm1, Rd(left as u8) @@ -4084,7 +4055,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movd xmm1, Rd(left as u8) @@ -4102,7 +4073,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movd xmm1, Rd(left as u8) @@ -4120,7 +4091,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movd xmm1, Rd(left as u8) @@ -4143,7 +4114,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; movd xmm1, Rd(reg as u8) @@ -4159,7 +4130,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; and Rd(reg as u8), 0x7fffffffu32 as i32 @@ -4173,7 +4144,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; btc Rd(reg as u8), 31 @@ -4187,7 +4158,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; movd xmm1, Rd(reg as u8) @@ -4203,7 +4174,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; movd xmm1, Rd(reg as u8) @@ -4219,7 +4190,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; movd xmm1, Rd(reg as u8) @@ -4235,7 +4206,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; movd xmm1, Rd(reg as u8) @@ -4251,7 +4222,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { Self::emit_f32_int_conv_check( assembler, reg, @@ -4273,7 +4244,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { Self::emit_f32_int_conv_check( assembler, reg, @@ -4295,7 +4266,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { Self::emit_f32_int_conv_check( assembler, reg, @@ -4317,7 +4288,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { Self::emit_f32_int_conv_check( assembler, reg, @@ -4339,7 +4310,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movq xmm1, Rq(left as u8) @@ -4356,7 +4327,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movq xmm1, Rq(left as u8) @@ -4373,7 +4344,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movq xmm1, Rq(left as u8) @@ -4390,7 +4361,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movq xmm1, Rq(left as u8) @@ -4407,7 +4378,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movq xmm1, Rq(left as u8) @@ -4424,7 +4395,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movq xmm1, Rq(left as u8) @@ -4441,7 +4412,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movq xmm1, Rq(left as u8) @@ -4459,7 +4430,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movq xmm1, Rq(left as u8) @@ -4477,7 +4448,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movq xmm1, Rq(left as u8) @@ -4495,7 +4466,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movq xmm1, Rq(left as u8) @@ -4513,7 +4484,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movq xmm1, Rq(left as u8) @@ -4531,7 +4502,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movq xmm1, Rq(left as u8) @@ -4549,7 +4520,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_binop( assembler, &mut self.value_stack, - |assembler, value_stack, left, right| { + |assembler, _value_stack, left, right| { dynasm!( assembler ; movq xmm1, Rq(left as u8) @@ -4572,7 +4543,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; movq xmm1, Rq(reg as u8) @@ -4588,7 +4559,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; movq xmm1, Rq(reg as u8) @@ -4606,7 +4577,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; btc Rq(reg as u8), 63 @@ -4620,7 +4591,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; movq xmm1, Rq(reg as u8) @@ -4636,7 +4607,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; movq xmm1, Rq(reg as u8) @@ -4652,7 +4623,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; movq xmm1, Rq(reg as u8) @@ -4668,7 +4639,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { dynasm!( assembler ; movq xmm1, Rq(reg as u8) @@ -4684,7 +4655,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { Self::emit_f64_int_conv_check( assembler, reg, @@ -4707,7 +4678,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { Self::emit_f64_int_conv_check( assembler, reg, @@ -4730,7 +4701,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { Self::emit_f64_int_conv_check( assembler, reg, @@ -4753,7 +4724,7 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_unop( assembler, &mut self.value_stack, - |assembler, value_stack, reg| { + |assembler, _value_stack, reg| { Self::emit_f64_int_conv_check( assembler, reg, @@ -4876,15 +4847,6 @@ fn is_dword(n: usize) -> bool { n == 4 } -fn value_to_i64(v: &Value) -> i64 { - match *v { - Value::F32(x) => x.to_bits() as u64 as i64, - Value::F64(x) => x.to_bits() as u64 as i64, - Value::I32(x) => x as u64 as i64, - Value::I64(x) => x as u64 as i64, - } -} - fn type_to_wp_type(ty: Type) -> WpType { match ty { Type::I32 => WpType::I32, @@ -4894,31 +4856,20 @@ fn type_to_wp_type(ty: Type) -> WpType { } } -unsafe extern "C" fn do_trap( - ctx1: usize, - ctx2: TrapCode, - stack_top: *mut u8, - stack_base: *mut u8, - vmctx: *mut vm::Ctx, - memory_base: *mut u8, -) -> u64 { - panic!("TRAP CODE: {:?}", ctx2); -} - unsafe extern "C" fn invoke_import( _unused: usize, import_id: usize, stack_top: *mut u8, stack_base: *mut u8, vmctx: *mut vm::Ctx, - memory_base: *mut u8, + _memory_base: *mut u8, ) -> u64 { let vmctx: &mut vm::Ctx = &mut *vmctx; let import = (*vmctx.imported_funcs.offset(import_id as isize)).func; - let n_args = (stack_base as usize - stack_top as usize) / 8; + /*let n_args = (stack_base as usize - stack_top as usize) / 8; - /*println!("Calling import: {:?} with vmctx = {:?}, n_args = {}", + println!("Calling import: {:?} with vmctx = {:?}, n_args = {}", import, vmctx as *mut _, n_args, @@ -4956,7 +4907,7 @@ unsafe extern "C" fn call_indirect( } ; if elem_index >= table.count as usize { eprintln!("element index out of bounds"); - unsafe { protect_unix::trigger_trap(); } + protect_unix::trigger_trap(); } let anyfunc = &*(table.base as *mut vm::Anyfunc).offset(elem_index as isize); let ctx: &X64ExecutionContext = @@ -4966,7 +4917,7 @@ unsafe extern "C" fn call_indirect( Some(x) => x, None => { eprintln!("empty table entry"); - unsafe { protect_unix::trigger_trap(); } + protect_unix::trigger_trap(); } }; @@ -4979,7 +4930,7 @@ unsafe extern "C" fn call_indirect( != ctx.signatures[ctx.function_signatures[func_index]] { eprintln!("signature mismatch"); - unsafe { protect_unix::trigger_trap(); } + protect_unix::trigger_trap(); } let func = ctx.function_pointers[func_index.index() as usize].0; @@ -5006,10 +4957,10 @@ enum MemoryKind { unsafe extern "C" fn _memory_size( op: MemoryKind, index: usize, - mut stack_top: *mut u8, - stack_base: *mut u8, + _stack_top: *mut u8, + _stack_base: *mut u8, vmctx: *mut vm::Ctx, - memory_base: *mut u8, + _memory_base: *mut u8, ) -> u64 { use wasmer_runtime_core::vmcalls; let ret = match op { @@ -5026,10 +4977,10 @@ unsafe extern "C" fn _memory_size( unsafe extern "C" fn _memory_grow( op: MemoryKind, index: usize, - mut stack_top: *mut u8, + stack_top: *mut u8, stack_base: *mut u8, vmctx: *mut vm::Ctx, - memory_base: *mut u8, + _memory_base: *mut u8, ) -> u64 { use wasmer_runtime_core::vmcalls; assert_eq!(stack_base as usize - stack_top as usize, 8); diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index 080cafca769..377f1a6738d 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -6,7 +6,6 @@ )))] compile_error!("This crate doesn't yet support compiling on operating systems other than linux and macos and architectures other than x86_64"); -#[macro_use] extern crate dynasmrt; #[macro_use] @@ -27,23 +26,21 @@ use crate::codegen::{CodegenError, ModuleCodeGenerator}; use crate::parse::LoadError; use std::ptr::NonNull; use wasmer_runtime_core::{ - backend::{sys::Memory, Backend, CacheGen, Compiler, FuncResolver, ProtectedCaller, Token, UserTrapper}, + backend::{sys::Memory, Backend, CacheGen, Compiler, FuncResolver, Token}, cache::{Artifact, Error as CacheError}, - error::{CompileError, CompileResult, RuntimeResult}, - module::{ModuleInfo, ModuleInner, StringTable}, - structures::{Map, TypedIndex}, + error::{CompileError, CompileResult}, + module::{ModuleInfo, ModuleInner}, types::{ - FuncIndex, FuncSig, GlobalIndex, LocalFuncIndex, MemoryIndex, SigIndex, TableIndex, Type, - Value, + LocalFuncIndex, }, - vm::{self, ImportBacking}, + vm, }; struct Placeholder; impl CacheGen for Placeholder { fn generate_cache( &self, - module: &ModuleInner, + _module: &ModuleInner, ) -> Result<(Box, Box<[u8]>, Memory), CacheError> { // unimplemented!() Err(CacheError::Unknown("the dynasm backend doesn't support caching yet".to_string())) diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs index c9da4a0cd23..5c15fff65a2 100644 --- a/lib/dynasm-backend/src/parse.rs +++ b/lib/dynasm-backend/src/parse.rs @@ -1,5 +1,4 @@ use crate::codegen::{CodegenError, FunctionCodeGenerator, ModuleCodeGenerator}; -use std::sync::Arc; use wasmer_runtime_core::{ backend::{Backend, ProtectedCaller, FuncResolver}, module::{ @@ -15,7 +14,7 @@ use wasmer_runtime_core::{ units::Pages, }; use wasmparser::{ - BinaryReaderError, CodeSectionReader, Data, DataKind, Element, ElementKind, Export, + BinaryReaderError, Data, DataKind, Element, ElementKind, Export, ExternalKind, FuncType, Import, ImportSectionEntryType, InitExpr, ModuleReader, Operator, SectionCode, Type as WpType, WasmDecoder, @@ -308,7 +307,7 @@ pub fn read_module< mcg.check_precondition(&info)?; for i in 0..code_reader.get_count() { let item = code_reader.read()?; - let mut fcg = mcg.next_function()?; + let fcg = mcg.next_function()?; let sig = info .signatures .get( diff --git a/lib/dynasm-backend/src/protect_unix.rs b/lib/dynasm-backend/src/protect_unix.rs index 38fa5e954e2..4daf633aa71 100644 --- a/lib/dynasm-backend/src/protect_unix.rs +++ b/lib/dynasm-backend/src/protect_unix.rs @@ -73,7 +73,7 @@ pub fn call_protected(f: impl FnOnce() -> T) -> RuntimeResult { if signum != 0 { *jmp_buf = prev_jmp_buf; - let (faulting_addr, inst_ptr) = CAUGHT_ADDRESSES.with(|cell| cell.get()); + let (faulting_addr, _inst_ptr) = CAUGHT_ADDRESSES.with(|cell| cell.get()); let signal = match Signal::from_c_int(signum) { Ok(SIGFPE) => "floating-point exception", diff --git a/lib/dynasm-backend/src/stack.rs b/lib/dynasm-backend/src/stack.rs index d1ffc29f859..d237b05b1a4 100644 --- a/lib/dynasm-backend/src/stack.rs +++ b/lib/dynasm-backend/src/stack.rs @@ -144,15 +144,6 @@ impl ValueStack { } } - pub fn peek(&self) -> Result { - match self.values.last().cloned() { - Some(x) => Ok(x), - None => Err(CodegenError { - message: "no value on top of stack", - }), - } - } - pub fn reset_depth(&mut self, target_depth: usize) { self.values.truncate(target_depth); } From 7394df2fd8fe3f0fc29e971d61adbdd40ac2e92a Mon Sep 17 00:00:00 2001 From: losfair Date: Sun, 17 Mar 2019 19:54:20 +0800 Subject: [PATCH 071/100] FIx floating point trunc's. --- lib/dynasm-backend/src/codegen_x64.rs | 85 +++++++++++++++++++-------- 1 file changed, 61 insertions(+), 24 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 5d93066cf3e..02d38f87333 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -3746,19 +3746,17 @@ impl FunctionCodeGenerator for X64FunctionCode { assembler ; test Rq(reg as u8), Rq(reg as u8) ; js >do_convert - // fast path: positive as signed ; cvtsi2ss xmm1, Rq(reg as u8) ; movd Rd(reg as u8), xmm1 ; jmp >end_convert ; do_convert: - // use r15 as temporary register ; movq xmm5, r15 ; mov r15, Rq(reg as u8) ; and r15, 1 ; shr Rq(reg as u8), 1 ; or Rq(reg as u8), r15 ; cvtsi2ss xmm1, Rq(reg as u8) - ; addsd xmm1, xmm1 + ; addss xmm1, xmm1 ; movq r15, xmm5 ; movd Rd(reg as u8), xmm1 ; end_convert: @@ -3823,12 +3821,10 @@ impl FunctionCodeGenerator for X64FunctionCode { assembler ; test Rq(reg as u8), Rq(reg as u8) ; js >do_convert - // fast path: positive as signed ; cvtsi2sd xmm1, Rq(reg as u8) ; movq Rq(reg as u8), xmm1 ; jmp >end_convert ; do_convert: - // use r15 as temporary register ; movq xmm5, r15 ; mov r15, Rq(reg as u8) ; and r15, 1 @@ -4232,8 +4228,8 @@ impl FunctionCodeGenerator for X64FunctionCode { dynasm!( assembler ; movd xmm1, Rd(reg as u8) - ; roundss xmm1, xmm1, 3 - ; cvtss2si Rd(reg as u8), xmm1 + ; cvttss2si Rq(reg as u8), xmm1 + ; mov Rd(reg as u8), Rd(reg as u8) ); }, WpType::F32, @@ -4254,8 +4250,7 @@ impl FunctionCodeGenerator for X64FunctionCode { dynasm!( assembler ; movd xmm1, Rd(reg as u8) - ; roundss xmm1, xmm1, 3 - ; cvtss2si Rd(reg as u8), xmm1 + ; cvttss2si Rd(reg as u8), xmm1 ); }, WpType::F32, @@ -4273,11 +4268,33 @@ impl FunctionCodeGenerator for X64FunctionCode { -1.0, 18446744073709551616.0, ); + /* + LCPI0_0: + .long 1593835520 ## float 9.22337203E+18 + + movss LCPI0_0(%rip), %xmm1 ## xmm1 = mem[0],zero,zero,zero + movaps %xmm0, %xmm2 + subss %xmm1, %xmm2 + cvttss2si %xmm2, %rax + movabsq $-9223372036854775808, %rcx ## imm = 0x8000000000000000 + xorq %rax, %rcx + cvttss2si %xmm0, %rax + ucomiss %xmm1, %xmm0 + cmovaeq %rcx, %rax + */ dynasm!( assembler - ; movd xmm1, Rd(reg as u8) - ; roundss xmm1, xmm1, 3 - ; cvtss2si Rq(reg as u8), xmm1 + ; mov r13d, 1593835520u32 as i32 //float 9.22337203E+18 + ; movd xmm1, r13d + ; movd xmm2, Rd(reg as u8) + ; movd xmm3, Rd(reg as u8) + ; subss xmm2, xmm1 + ; cvttss2si Rq(reg as u8), xmm2 + ; mov r13, QWORD 0x8000000000000000u64 as i64 + ; xor r13, Rq(reg as u8) + ; cvttss2si Rq(reg as u8), xmm3 + ; ucomiss xmm3, xmm1 + ; cmovae Rq(reg as u8), r13 ); }, WpType::F32, @@ -4298,8 +4315,7 @@ impl FunctionCodeGenerator for X64FunctionCode { dynasm!( assembler ; movd xmm1, Rd(reg as u8) - ; roundss xmm1, xmm1, 3 - ; cvtss2si Rq(reg as u8), xmm1 + ; cvttss2si Rq(reg as u8), xmm1 ); }, WpType::F32, @@ -4565,7 +4581,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ; movq xmm1, Rq(reg as u8) ; mov rax, QWORD 0x7fffffffffffffff ; movq xmm2, rax - ; por xmm1, xmm2 + ; pand xmm1, xmm2 ; movq Rq(reg as u8), xmm1 ); }, @@ -4666,8 +4682,8 @@ impl FunctionCodeGenerator for X64FunctionCode { dynasm!( assembler ; movq xmm1, Rq(reg as u8) - ; roundsd xmm1, xmm1, 3 - ; cvtsd2si Rd(reg as u8), xmm1 + ; cvttsd2si Rq(reg as u8), xmm1 + ; mov Rd(reg as u8), Rd(reg as u8) ); }, WpType::F64, @@ -4689,8 +4705,7 @@ impl FunctionCodeGenerator for X64FunctionCode { dynasm!( assembler ; movq xmm1, Rq(reg as u8) - ; roundsd xmm1, xmm1, 3 - ; cvtsd2si Rd(reg as u8), xmm1 + ; cvttsd2si Rd(reg as u8), xmm1 ); }, WpType::F64, @@ -4709,11 +4724,34 @@ impl FunctionCodeGenerator for X64FunctionCode { 18446744073709551616.0, ); + /* + LCPI0_0: + .quad 4890909195324358656 ## double 9.2233720368547758E+18 + + movsd LCPI0_0(%rip), %xmm1 ## xmm1 = mem[0],zero + movapd %xmm0, %xmm2 + subsd %xmm1, %xmm2 + cvttsd2si %xmm2, %rax + movabsq $-9223372036854775808, %rcx ## imm = 0x8000000000000000 + xorq %rax, %rcx + cvttsd2si %xmm0, %rax + ucomisd %xmm1, %xmm0 + cmovaeq %rcx, %rax + */ + dynasm!( assembler - ; movq xmm1, Rq(reg as u8) - ; roundsd xmm1, xmm1, 3 - ; cvtsd2si Rq(reg as u8), xmm1 + ; mov r13, QWORD 4890909195324358656u64 as i64 //double 9.2233720368547758E+18 + ; movq xmm1, r13 + ; movq xmm2, Rq(reg as u8) + ; movq xmm3, Rq(reg as u8) + ; subsd xmm2, xmm1 + ; cvttsd2si Rq(reg as u8), xmm2 + ; mov r13, QWORD 0x8000000000000000u64 as i64 + ; xor r13, Rq(reg as u8) + ; cvttsd2si Rq(reg as u8), xmm3 + ; ucomisd xmm3, xmm1 + ; cmovae Rq(reg as u8), r13 ); }, WpType::F64, @@ -4735,8 +4773,7 @@ impl FunctionCodeGenerator for X64FunctionCode { dynasm!( assembler ; movq xmm1, Rq(reg as u8) - ; roundsd xmm1, xmm1, 3 - ; cvtsd2si Rq(reg as u8), xmm1 + ; cvttsd2si Rq(reg as u8), xmm1 ); }, WpType::F64, From fcfde7352a2cc31fe48803fe0e5f24297db29068 Mon Sep 17 00:00:00 2001 From: losfair Date: Sun, 17 Mar 2019 21:27:19 +0800 Subject: [PATCH 072/100] Use R15 as temporary register. --- lib/dynasm-backend/src/codegen_x64.rs | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 02d38f87333..be5249c8234 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -4284,17 +4284,19 @@ impl FunctionCodeGenerator for X64FunctionCode { */ dynasm!( assembler - ; mov r13d, 1593835520u32 as i32 //float 9.22337203E+18 - ; movd xmm1, r13d + ; movq xmm5, r15 + ; mov r15d, 1593835520u32 as i32 //float 9.22337203E+18 + ; movd xmm1, r15d ; movd xmm2, Rd(reg as u8) ; movd xmm3, Rd(reg as u8) ; subss xmm2, xmm1 ; cvttss2si Rq(reg as u8), xmm2 - ; mov r13, QWORD 0x8000000000000000u64 as i64 - ; xor r13, Rq(reg as u8) + ; mov r15, QWORD 0x8000000000000000u64 as i64 + ; xor r15, Rq(reg as u8) ; cvttss2si Rq(reg as u8), xmm3 ; ucomiss xmm3, xmm1 - ; cmovae Rq(reg as u8), r13 + ; cmovae Rq(reg as u8), r15 + ; movq r15, xmm5 ); }, WpType::F32, @@ -4741,17 +4743,19 @@ impl FunctionCodeGenerator for X64FunctionCode { dynasm!( assembler - ; mov r13, QWORD 4890909195324358656u64 as i64 //double 9.2233720368547758E+18 - ; movq xmm1, r13 + ; movq xmm5, r15 + ; mov r15, QWORD 4890909195324358656u64 as i64 //double 9.2233720368547758E+18 + ; movq xmm1, r15 ; movq xmm2, Rq(reg as u8) ; movq xmm3, Rq(reg as u8) ; subsd xmm2, xmm1 ; cvttsd2si Rq(reg as u8), xmm2 - ; mov r13, QWORD 0x8000000000000000u64 as i64 - ; xor r13, Rq(reg as u8) + ; mov r15, QWORD 0x8000000000000000u64 as i64 + ; xor r15, Rq(reg as u8) ; cvttsd2si Rq(reg as u8), xmm3 ; ucomisd xmm3, xmm1 - ; cmovae Rq(reg as u8), r13 + ; cmovae Rq(reg as u8), r15 + ; movq r15, xmm5 ); }, WpType::F64, From 337b2ebf1bd40ce8db66506c8e1c024a70081d07 Mon Sep 17 00:00:00 2001 From: Brandon Fish Date: Sun, 17 Mar 2019 09:38:43 -0500 Subject: [PATCH 073/100] Add dynasm tests to the Makefile --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index a51d667894a..a4ecfca3638 100644 --- a/Makefile +++ b/Makefile @@ -37,12 +37,14 @@ test: cargo test --all --exclude wasmer-runtime-c-api --exclude wasmer-emscripten --exclude wasmer-spectests -- $(runargs) # cargo test --all --exclude wasmer-emscripten -- --test-threads=1 $(runargs) cargo test --manifest-path lib/spectests/Cargo.toml --features clif + cargo test --manifest-path lib/spectests/Cargo.toml --features dynasm cargo test --manifest-path lib/spectests/Cargo.toml --features llvm cargo build -p wasmer-runtime-c-api cargo test -p wasmer-runtime-c-api -- --nocapture test-emscripten: cargo test --manifest-path lib/emscripten/Cargo.toml --features clif -- --test-threads=1 $(runargs) + cargo test --manifest-path lib/emscripten/Cargo.toml --features dynasm -- --test-threads=1 $(runargs) cargo test --manifest-path lib/emscripten/Cargo.toml --features llvm -- --test-threads=1 $(runargs) release: From 99faa798694fe09a44bbb315ee5e7a65c787a88e Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 18 Mar 2019 00:31:36 +0800 Subject: [PATCH 074/100] Run cargo fmt. --- lib/dynasm-backend/Cargo.toml | 2 +- lib/dynasm-backend/src/codegen.rs | 9 +- lib/dynasm-backend/src/codegen_x64.rs | 284 ++++++++++++++------------ lib/dynasm-backend/src/lib.rs | 14 +- lib/dynasm-backend/src/parse.rs | 7 +- 5 files changed, 167 insertions(+), 149 deletions(-) diff --git a/lib/dynasm-backend/Cargo.toml b/lib/dynasm-backend/Cargo.toml index 428a90f4760..ea03ff8cae2 100644 --- a/lib/dynasm-backend/Cargo.toml +++ b/lib/dynasm-backend/Cargo.toml @@ -10,7 +10,7 @@ edition = "2018" [dependencies] wasmer-runtime-core = { path = "../runtime-core" } wasmparser = "0.28.0" -dynasm = "0.3.0" +dynasm = "0.3.1" dynasmrt = "0.3.1" lazy_static = "1.2.0" byteorder = "1" diff --git a/lib/dynasm-backend/src/codegen.rs b/lib/dynasm-backend/src/codegen.rs index 59b7b470697..798dea114c0 100644 --- a/lib/dynasm-backend/src/codegen.rs +++ b/lib/dynasm-backend/src/codegen.rs @@ -1,8 +1,8 @@ use wasmer_runtime_core::{ - backend::{ProtectedCaller, FuncResolver}, + backend::{FuncResolver, ProtectedCaller}, + module::ModuleInfo, structures::Map, types::{FuncIndex, FuncSig, SigIndex}, - module::ModuleInfo, }; use wasmparser::{Operator, Type as WpType}; @@ -10,10 +10,7 @@ pub trait ModuleCodeGenerator Result<(), CodegenError>; fn next_function(&mut self) -> Result<&mut FCG, CodegenError>; fn finalize(self, module_info: &ModuleInfo) -> Result<(PC, FR), CodegenError>; - fn feed_signatures( - &mut self, - signatures: Map, - ) -> Result<(), CodegenError>; + fn feed_signatures(&mut self, signatures: Map) -> Result<(), CodegenError>; fn feed_function_signatures( &mut self, assoc: Map, diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index be5249c8234..3b28370e3f3 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -2,6 +2,7 @@ use super::codegen::*; use super::stack::{ ControlFrame, ControlStack, IfElseState, ScratchRegister, ValueInfo, ValueLocation, ValueStack, }; +use crate::protect_unix; use byteorder::{ByteOrder, LittleEndian}; use dynasmrt::{ x64::Assembler, AssemblyOffset, DynamicLabel, DynasmApi, DynasmLabelApi, ExecutableBuffer, @@ -12,18 +13,17 @@ use std::{any::Any, collections::HashMap, sync::Arc}; use wasmer_runtime_core::{ backend::{FuncResolver, ProtectedCaller, Token, UserTrapper}, error::{RuntimeError, RuntimeResult}, + memory::MemoryType, module::{ModuleInfo, ModuleInner}, structures::{Map, TypedIndex}, types::{ - FuncIndex, FuncSig, LocalFuncIndex, LocalGlobalIndex, MemoryIndex, SigIndex, - Type, Value, LocalMemoryIndex, ImportedMemoryIndex, LocalOrImport, + FuncIndex, FuncSig, ImportedMemoryIndex, LocalFuncIndex, LocalGlobalIndex, + LocalMemoryIndex, LocalOrImport, MemoryIndex, SigIndex, Type, Value, }, - memory::MemoryType, units::Pages, - vm::{self, ImportBacking, LocalGlobal, LocalTable, LocalMemory}, + vm::{self, ImportBacking, LocalGlobal, LocalMemory, LocalTable}, }; use wasmparser::{Operator, Type as WpType}; -use crate::protect_unix; thread_local! { static CURRENT_EXECUTION_CONTEXT: RefCell> = RefCell::new(Vec::new()); @@ -262,9 +262,12 @@ pub struct X64RuntimeResolver { } impl X64ExecutionContext { - fn get_runtime_resolver(&self, module_info: &ModuleInfo) -> Result { + fn get_runtime_resolver( + &self, + module_info: &ModuleInfo, + ) -> Result { let mut assembler = Assembler::new().unwrap(); - let mut offsets: Vec = vec! []; + let mut offsets: Vec = vec![]; for i in self.func_import_count..self.function_pointers.len() { offsets.push(assembler.offset()); @@ -272,12 +275,15 @@ impl X64ExecutionContext { &mut assembler, module_info, self.function_pointers[i], - self.signatures[self.function_signatures[FuncIndex::new(i)]].params().len(), + self.signatures[self.function_signatures[FuncIndex::new(i)]] + .params() + .len(), )?; } let code = assembler.finalize().unwrap(); - let local_pointers: Vec = offsets.iter().map(|x| FuncPtr(code.ptr(*x) as _)).collect(); + let local_pointers: Vec = + offsets.iter().map(|x| FuncPtr(code.ptr(*x) as _)).collect(); Ok(X64RuntimeResolver { _code: code, @@ -292,9 +298,7 @@ impl FuncResolver for X64RuntimeResolver { _module: &ModuleInner, _local_func_index: LocalFuncIndex, ) -> Option> { - NonNull::new( - self.local_pointers[_local_func_index.index() as usize].0 as *mut vm::Func, - ) + NonNull::new(self.local_pointers[_local_func_index.index() as usize].0 as *mut vm::Func) } } @@ -513,13 +517,22 @@ impl X64ModuleCodeGenerator { } } -impl ModuleCodeGenerator for X64ModuleCodeGenerator { +impl ModuleCodeGenerator + for X64ModuleCodeGenerator +{ fn check_precondition(&mut self, module_info: &ModuleInfo) -> Result<(), CodegenError> { - for mem in module_info.memories.iter().map(|(_, v)| v).chain(module_info.imported_memories.iter().map(|(_, v)| &v.1)) { + for mem in module_info + .memories + .iter() + .map(|(_, v)| v) + .chain(module_info.imported_memories.iter().map(|(_, v)| &v.1)) + { match mem.memory_type() { - MemoryType::Dynamic => return Err(CodegenError { - message: "dynamic memory isn't supported yet" - }), + MemoryType::Dynamic => { + return Err(CodegenError { + message: "dynamic memory isn't supported yet", + }) + } _ => {} } } @@ -574,7 +587,10 @@ impl ModuleCodeGenerator Result<(X64ExecutionContext, X64RuntimeResolver), CodegenError> { + fn finalize( + mut self, + module_info: &ModuleInfo, + ) -> Result<(X64ExecutionContext, X64RuntimeResolver), CodegenError> { let (assembler, mut br_table_data) = match self.functions.last_mut() { Some(x) => (x.assembler.take().unwrap(), x.br_table_data.take().unwrap()), None => { @@ -646,10 +662,7 @@ impl ModuleCodeGenerator, - ) -> Result<(), CodegenError> { + fn feed_signatures(&mut self, signatures: Map) -> Result<(), CodegenError> { self.signatures = Some(Arc::new(signatures)); Ok(()) } @@ -703,11 +716,15 @@ impl X64FunctionCode { Ok(()) } - fn emit_reinterpret(value_stack: &mut ValueStack, in_ty: WpType, out_ty: WpType) -> Result<(), CodegenError> { + fn emit_reinterpret( + value_stack: &mut ValueStack, + in_ty: WpType, + out_ty: WpType, + ) -> Result<(), CodegenError> { let val = value_stack.pop()?; if val.ty != in_ty { return Err(CodegenError { - message: "reinterpret type mismatch" + message: "reinterpret type mismatch", }); } value_stack.push(out_ty); @@ -1334,7 +1351,12 @@ impl X64FunctionCode { Ok(()) } - fn emit_managed_call_trampoline(assembler: &mut Assembler, info: &ModuleInfo, target: FuncPtr, num_params: usize) -> Result<(), CodegenError> { + fn emit_managed_call_trampoline( + assembler: &mut Assembler, + info: &ModuleInfo, + target: FuncPtr, + num_params: usize, + ) -> Result<(), CodegenError> { dynasm!( assembler ; push rbp @@ -1368,7 +1390,6 @@ impl X64FunctionCode { } } - dynasm!( assembler ; mov r8, rdi // vmctx @@ -1390,7 +1411,7 @@ impl X64FunctionCode { true } else if info.imported_memories.len() > 0 { if info.memories.len() != 0 || info.imported_memories.len() != 1 { - return Err(CodegenError{ + return Err(CodegenError { message: "only one linear memory is supported", }); } @@ -1879,7 +1900,14 @@ impl FunctionCodeGenerator for X64FunctionCode { Operator::Else => { // We are in a reachable true branch if self.unreachable_depth == 1 { - if let Some(IfElseState::If(_)) = self.control_stack.as_ref().unwrap().frames.last().map(|x| x.if_else) { + if let Some(IfElseState::If(_)) = self + .control_stack + .as_ref() + .unwrap() + .frames + .last() + .map(|x| x.if_else) + { self.unreachable_depth -= 1; } } @@ -2324,7 +2352,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ; shl Rd(left as u8), cl ) }); - } + }, )?; } Operator::I32ShrU => { @@ -2338,7 +2366,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ; shr Rd(left as u8), cl ) }); - } + }, )?; } Operator::I32ShrS => { @@ -2352,7 +2380,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ; sar Rd(left as u8), cl ) }); - } + }, )?; } Operator::I32Rotl => { @@ -2366,7 +2394,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ; rol Rd(left as u8), cl ) }); - } + }, )?; } Operator::I32Rotr => { @@ -2380,7 +2408,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ; ror Rd(left as u8), cl ) }); - } + }, )?; } // Comparison operators. @@ -2760,7 +2788,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ; shl Rq(left as u8), cl ) }); - } + }, )?; } Operator::I64ShrU => { @@ -2774,7 +2802,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ; shr Rq(left as u8), cl ) }); - } + }, )?; } Operator::I64ShrS => { @@ -2788,7 +2816,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ; sar Rq(left as u8), cl ) }); - } + }, )?; } Operator::I64Rotl => { @@ -2802,7 +2830,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ; rol Rq(left as u8), cl ) }); - } + }, )?; } Operator::I64Rotr => { @@ -2816,7 +2844,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ; ror Rq(left as u8), cl ) }); - } + }, )?; } // Comparison operators. @@ -3724,19 +3752,19 @@ impl FunctionCodeGenerator for X64FunctionCode { )?; } /* - 0: 48 85 ff test %rdi,%rdi - 3: 78 0b js 10 - 5: c4 e1 fb 2a c7 vcvtsi2sd %rdi,%xmm0,%xmm0 - a: c3 retq - b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) - 10: 48 89 f8 mov %rdi,%rax - 13: 83 e7 01 and $0x1,%edi - 16: 48 d1 e8 shr %rax - 19: 48 09 f8 or %rdi,%rax - 1c: c4 e1 fb 2a c0 vcvtsi2sd %rax,%xmm0,%xmm0 - 21: c5 fb 58 c0 vaddsd %xmm0,%xmm0,%xmm0 - 25: c3 retq - */ + 0: 48 85 ff test %rdi,%rdi + 3: 78 0b js 10 + 5: c4 e1 fb 2a c7 vcvtsi2sd %rdi,%xmm0,%xmm0 + a: c3 retq + b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) + 10: 48 89 f8 mov %rdi,%rax + 13: 83 e7 01 and $0x1,%edi + 16: 48 d1 e8 shr %rax + 19: 48 09 f8 or %rdi,%rax + 1c: c4 e1 fb 2a c0 vcvtsi2sd %rax,%xmm0,%xmm0 + 21: c5 fb 58 c0 vaddsd %xmm0,%xmm0,%xmm0 + 25: c3 retq + */ Operator::F32ConvertUI64 => { Self::emit_unop( assembler, @@ -4119,7 +4147,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F32, - WpType::F32 + WpType::F32, )?; } Operator::F32Abs => { @@ -4133,7 +4161,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F32, - WpType::F32 + WpType::F32, )?; } Operator::F32Neg => { @@ -4147,7 +4175,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F32, - WpType::F32 + WpType::F32, )?; } Operator::F32Nearest => { @@ -4163,7 +4191,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F32, - WpType::F32 + WpType::F32, )?; } Operator::F32Floor => { @@ -4179,7 +4207,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F32, - WpType::F32 + WpType::F32, )?; } Operator::F32Ceil => { @@ -4195,7 +4223,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F32, - WpType::F32 + WpType::F32, )?; } Operator::F32Trunc => { @@ -4211,7 +4239,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F32, - WpType::F32 + WpType::F32, )?; } Operator::I32TruncUF32 => { @@ -4219,12 +4247,7 @@ impl FunctionCodeGenerator for X64FunctionCode { assembler, &mut self.value_stack, |assembler, _value_stack, reg| { - Self::emit_f32_int_conv_check( - assembler, - reg, - -1.0, - 4294967296.0, - ); + Self::emit_f32_int_conv_check(assembler, reg, -1.0, 4294967296.0); dynasm!( assembler ; movd xmm1, Rd(reg as u8) @@ -4233,7 +4256,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F32, - WpType::I32 + WpType::I32, )?; } Operator::I32TruncSF32 => { @@ -4241,12 +4264,7 @@ impl FunctionCodeGenerator for X64FunctionCode { assembler, &mut self.value_stack, |assembler, _value_stack, reg| { - Self::emit_f32_int_conv_check( - assembler, - reg, - -2147483904.0, - 2147483648.0 - ); + Self::emit_f32_int_conv_check(assembler, reg, -2147483904.0, 2147483648.0); dynasm!( assembler ; movd xmm1, Rd(reg as u8) @@ -4254,7 +4272,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F32, - WpType::I32 + WpType::I32, )?; } Operator::I64TruncUF32 => { @@ -4262,12 +4280,7 @@ impl FunctionCodeGenerator for X64FunctionCode { assembler, &mut self.value_stack, |assembler, _value_stack, reg| { - Self::emit_f32_int_conv_check( - assembler, - reg, - -1.0, - 18446744073709551616.0, - ); + Self::emit_f32_int_conv_check(assembler, reg, -1.0, 18446744073709551616.0); /* LCPI0_0: .long 1593835520 ## float 9.22337203E+18 @@ -4300,7 +4313,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F32, - WpType::I64 + WpType::I64, )?; } Operator::I64TruncSF32 => { @@ -4321,7 +4334,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F32, - WpType::I64 + WpType::I64, )?; } Operator::F64Add => { @@ -4570,7 +4583,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F64, - WpType::F64 + WpType::F64, )?; } Operator::F64Abs => { @@ -4588,7 +4601,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F64, - WpType::F64 + WpType::F64, )?; } Operator::F64Neg => { @@ -4602,7 +4615,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F64, - WpType::F64 + WpType::F64, )?; } Operator::F64Nearest => { @@ -4618,7 +4631,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F64, - WpType::F64 + WpType::F64, )?; } Operator::F64Floor => { @@ -4634,7 +4647,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F64, - WpType::F64 + WpType::F64, )?; } Operator::F64Ceil => { @@ -4650,7 +4663,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F64, - WpType::F64 + WpType::F64, )?; } Operator::F64Trunc => { @@ -4666,7 +4679,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F64, - WpType::F64 + WpType::F64, )?; } Operator::I32TruncUF64 => { @@ -4674,12 +4687,7 @@ impl FunctionCodeGenerator for X64FunctionCode { assembler, &mut self.value_stack, |assembler, _value_stack, reg| { - Self::emit_f64_int_conv_check( - assembler, - reg, - -1.0, - 4294967296.0, - ); + Self::emit_f64_int_conv_check(assembler, reg, -1.0, 4294967296.0); dynasm!( assembler @@ -4689,7 +4697,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F64, - WpType::I32 + WpType::I32, )?; } Operator::I32TruncSF64 => { @@ -4697,12 +4705,7 @@ impl FunctionCodeGenerator for X64FunctionCode { assembler, &mut self.value_stack, |assembler, _value_stack, reg| { - Self::emit_f64_int_conv_check( - assembler, - reg, - -2147483649.0, - 2147483648.0, - ); + Self::emit_f64_int_conv_check(assembler, reg, -2147483649.0, 2147483648.0); dynasm!( assembler @@ -4711,7 +4714,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F64, - WpType::I32 + WpType::I32, )?; } Operator::I64TruncUF64 => { @@ -4719,12 +4722,7 @@ impl FunctionCodeGenerator for X64FunctionCode { assembler, &mut self.value_stack, |assembler, _value_stack, reg| { - Self::emit_f64_int_conv_check( - assembler, - reg, - -1.0, - 18446744073709551616.0, - ); + Self::emit_f64_int_conv_check(assembler, reg, -1.0, 18446744073709551616.0); /* LCPI0_0: @@ -4759,7 +4757,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F64, - WpType::I64 + WpType::I64, )?; } Operator::I64TruncSF64 => { @@ -4781,7 +4779,7 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F64, - WpType::I64 + WpType::I64, )?; } Operator::Nop => {} @@ -4794,7 +4792,9 @@ impl FunctionCodeGenerator for X64FunctionCode { //MemoryType::Dynamic => self.native_trampolines.memory_size_dynamic_local, MemoryType::Dynamic => unimplemented!(), MemoryType::Static => self.native_trampolines.memory_size_static_local, - MemoryType::SharedStatic => self.native_trampolines.memory_size_shared_local, + MemoryType::SharedStatic => { + self.native_trampolines.memory_size_shared_local + } } } LocalOrImport::Import(import_mem_index) => { @@ -4803,17 +4803,13 @@ impl FunctionCodeGenerator for X64FunctionCode { //MemoryType::Dynamic => self.native_trampolines.memory_size_dynamic_import, MemoryType::Dynamic => unimplemented!(), MemoryType::Static => self.native_trampolines.memory_size_static_import, - MemoryType::SharedStatic => self.native_trampolines.memory_size_shared_import, + MemoryType::SharedStatic => { + self.native_trampolines.memory_size_shared_import + } } } }; - Self::emit_call_raw( - assembler, - &mut self.value_stack, - label, - &[], - &[WpType::I32] - )?; + Self::emit_call_raw(assembler, &mut self.value_stack, label, &[], &[WpType::I32])?; } Operator::MemoryGrow { reserved } => { let memory_index = MemoryIndex::new(reserved as usize); @@ -4824,7 +4820,9 @@ impl FunctionCodeGenerator for X64FunctionCode { //MemoryType::Dynamic => self.native_trampolines.memory_grow_dynamic_local, MemoryType::Dynamic => unimplemented!(), MemoryType::Static => self.native_trampolines.memory_grow_static_local, - MemoryType::SharedStatic => self.native_trampolines.memory_grow_shared_local, + MemoryType::SharedStatic => { + self.native_trampolines.memory_grow_shared_local + } } } LocalOrImport::Import(import_mem_index) => { @@ -4833,7 +4831,9 @@ impl FunctionCodeGenerator for X64FunctionCode { //MemoryType::Dynamic => self.native_trampolines.memory_grow_dynamic_import, MemoryType::Dynamic => unimplemented!(), MemoryType::Static => self.native_trampolines.memory_grow_static_import, - MemoryType::SharedStatic => self.native_trampolines.memory_grow_shared_import, + MemoryType::SharedStatic => { + self.native_trampolines.memory_grow_shared_import + } } } }; @@ -4842,12 +4842,12 @@ impl FunctionCodeGenerator for X64FunctionCode { &mut self.value_stack, label, &[WpType::I32], - &[WpType::I32] + &[WpType::I32], )?; } _ => { panic!("{:?}", op); - }, + } } Ok(()) } @@ -4927,7 +4927,7 @@ unsafe extern "C" fn invoke_import( #[derive(Copy, Clone, Debug)] enum CallIndirectLocalOrImport { Local, - Import + Import, } unsafe extern "C" fn call_indirect( @@ -4945,7 +4945,7 @@ unsafe extern "C" fn call_indirect( let table: &LocalTable = match local_or_import { CallIndirectLocalOrImport::Local => &*(*(*vmctx).tables), CallIndirectLocalOrImport::Import => &*(*(*vmctx).imported_tables), - } ; + }; if elem_index >= table.count as usize { eprintln!("element index out of bounds"); protect_unix::trigger_trap(); @@ -5005,11 +5005,19 @@ unsafe extern "C" fn _memory_size( ) -> u64 { use wasmer_runtime_core::vmcalls; let ret = match op { - MemoryKind::DynamicLocal => vmcalls::local_dynamic_memory_size(&*vmctx, LocalMemoryIndex::new(index)), - MemoryKind::StaticLocal => vmcalls::local_static_memory_size(&*vmctx, LocalMemoryIndex::new(index)), + MemoryKind::DynamicLocal => { + vmcalls::local_dynamic_memory_size(&*vmctx, LocalMemoryIndex::new(index)) + } + MemoryKind::StaticLocal => { + vmcalls::local_static_memory_size(&*vmctx, LocalMemoryIndex::new(index)) + } MemoryKind::SharedLocal => unreachable!(), - MemoryKind::DynamicImport => vmcalls::imported_dynamic_memory_size(&*vmctx, ImportedMemoryIndex::new(index)), - MemoryKind::StaticImport => vmcalls::imported_static_memory_size(&*vmctx, ImportedMemoryIndex::new(index)), + MemoryKind::DynamicImport => { + vmcalls::imported_dynamic_memory_size(&*vmctx, ImportedMemoryIndex::new(index)) + } + MemoryKind::StaticImport => { + vmcalls::imported_static_memory_size(&*vmctx, ImportedMemoryIndex::new(index)) + } MemoryKind::SharedImport => unreachable!(), }; ret.0 as u32 as u64 @@ -5027,11 +5035,23 @@ unsafe extern "C" fn _memory_grow( assert_eq!(stack_base as usize - stack_top as usize, 8); let pages = Pages(*(stack_top as *mut u32)); let ret = match op { - MemoryKind::DynamicLocal => vmcalls::local_dynamic_memory_grow(&mut *vmctx, LocalMemoryIndex::new(index), pages), - MemoryKind::StaticLocal => vmcalls::local_static_memory_grow(&mut *vmctx, LocalMemoryIndex::new(index), pages), + MemoryKind::DynamicLocal => { + vmcalls::local_dynamic_memory_grow(&mut *vmctx, LocalMemoryIndex::new(index), pages) + } + MemoryKind::StaticLocal => { + vmcalls::local_static_memory_grow(&mut *vmctx, LocalMemoryIndex::new(index), pages) + } MemoryKind::SharedLocal => unreachable!(), - MemoryKind::DynamicImport => vmcalls::imported_dynamic_memory_grow(&mut *vmctx, ImportedMemoryIndex::new(index), pages), - MemoryKind::StaticImport => vmcalls::imported_static_memory_grow(&mut *vmctx, ImportedMemoryIndex::new(index), pages), + MemoryKind::DynamicImport => vmcalls::imported_dynamic_memory_grow( + &mut *vmctx, + ImportedMemoryIndex::new(index), + pages, + ), + MemoryKind::StaticImport => vmcalls::imported_static_memory_grow( + &mut *vmctx, + ImportedMemoryIndex::new(index), + pages, + ), MemoryKind::SharedImport => unreachable!(), }; ret as u32 as u64 diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index 377f1a6738d..bb492ba09d1 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -19,8 +19,8 @@ extern crate byteorder; mod codegen; mod codegen_x64; mod parse; -mod stack; mod protect_unix; +mod stack; use crate::codegen::{CodegenError, ModuleCodeGenerator}; use crate::parse::LoadError; @@ -30,9 +30,7 @@ use wasmer_runtime_core::{ cache::{Artifact, Error as CacheError}, error::{CompileError, CompileResult}, module::{ModuleInfo, ModuleInner}, - types::{ - LocalFuncIndex, - }, + types::LocalFuncIndex, vm, }; @@ -43,7 +41,9 @@ impl CacheGen for Placeholder { _module: &ModuleInner, ) -> Result<(Box, Box<[u8]>, Memory), CacheError> { // unimplemented!() - Err(CacheError::Unknown("the dynasm backend doesn't support caching yet".to_string())) + Err(CacheError::Unknown( + "the dynasm backend doesn't support caching yet".to_string(), + )) } } @@ -78,7 +78,9 @@ impl Compiler for SinglePassCompiler { } unsafe fn from_cache(&self, _artifact: Artifact, _: Token) -> Result { - Err(CacheError::Unknown("the dynasm backend doesn't support caching yet".to_string())) + Err(CacheError::Unknown( + "the dynasm backend doesn't support caching yet".to_string(), + )) // unimplemented!("the dynasm backend doesn't support caching yet") } } diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs index 5c15fff65a2..7e918e9426e 100644 --- a/lib/dynasm-backend/src/parse.rs +++ b/lib/dynasm-backend/src/parse.rs @@ -1,6 +1,6 @@ use crate::codegen::{CodegenError, FunctionCodeGenerator, ModuleCodeGenerator}; use wasmer_runtime_core::{ - backend::{Backend, ProtectedCaller, FuncResolver}, + backend::{Backend, FuncResolver, ProtectedCaller}, module::{ DataInitializer, ExportIndex, ImportName, ModuleInfo, StringTable, StringTableBuilder, TableInitializer, @@ -14,9 +14,8 @@ use wasmer_runtime_core::{ units::Pages, }; use wasmparser::{ - BinaryReaderError, Data, DataKind, Element, ElementKind, Export, - ExternalKind, FuncType, Import, ImportSectionEntryType, InitExpr, ModuleReader, Operator, - SectionCode, Type as WpType, + BinaryReaderError, Data, DataKind, Element, ElementKind, Export, ExternalKind, FuncType, + Import, ImportSectionEntryType, InitExpr, ModuleReader, Operator, SectionCode, Type as WpType, WasmDecoder, }; From 6c40ea1cb5c682cd680611d17c2faf5bc6e7311c Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 18 Mar 2019 00:35:55 +0800 Subject: [PATCH 075/100] Cargo fmt more files. --- lib/runtime-core/src/backing.rs | 7 ++++++- lib/runtime-core/src/error.rs | 7 ++++--- lib/runtime-core/src/types.rs | 5 ++++- lib/runtime/src/lib.rs | 2 +- src/webassembly.rs | 5 ++++- 5 files changed, 19 insertions(+), 7 deletions(-) diff --git a/lib/runtime-core/src/backing.rs b/lib/runtime-core/src/backing.rs index 7a1e7239a90..2c9630764e4 100644 --- a/lib/runtime-core/src/backing.rs +++ b/lib/runtime-core/src/backing.rs @@ -230,7 +230,12 @@ impl LocalBacking { } }; - elements[init_base + i] = vm::Anyfunc { func, ctx, sig_id, func_index: Some(func_index) }; + elements[init_base + i] = vm::Anyfunc { + func, + ctx, + sig_id, + func_index: Some(func_index), + }; } }); } diff --git a/lib/runtime-core/src/error.rs b/lib/runtime-core/src/error.rs index 124b2d23aaa..ecb5c020768 100644 --- a/lib/runtime-core/src/error.rs +++ b/lib/runtime-core/src/error.rs @@ -1,10 +1,11 @@ use crate::types::{ - FuncSig, GlobalDescriptor, MemoryDescriptor, MemoryIndex, TableDescriptor, TableIndex, Type, Value, + FuncSig, GlobalDescriptor, MemoryDescriptor, MemoryIndex, TableDescriptor, TableIndex, Type, + Value, }; -use std::sync::Arc; -use wasmparser::BinaryReaderError; use core::borrow::Borrow; use std::any::Any; +use std::sync::Arc; +use wasmparser::BinaryReaderError; pub type Result = std::result::Result; pub type CompileResult = std::result::Result; diff --git a/lib/runtime-core/src/types.rs b/lib/runtime-core/src/types.rs index 3b641e043a2..fb30c6276f1 100644 --- a/lib/runtime-core/src/types.rs +++ b/lib/runtime-core/src/types.rs @@ -1,5 +1,8 @@ use crate::error::{CompileError, CompileResult}; -use crate::{memory::MemoryType, module::ModuleInfo, module::ModuleInner, structures::TypedIndex, units::Pages}; +use crate::{ + memory::MemoryType, module::ModuleInfo, module::ModuleInner, structures::TypedIndex, + units::Pages, +}; use std::{borrow::Cow, mem}; /// Represents a WebAssembly type. diff --git a/lib/runtime/src/lib.rs b/lib/runtime/src/lib.rs index 4784083347e..13557d38378 100644 --- a/lib/runtime/src/lib.rs +++ b/lib/runtime/src/lib.rs @@ -160,7 +160,7 @@ fn default_compiler() -> &'static dyn Compiler { #[cfg(not(feature = "llvm"))] use wasmer_dynasm_backend::SinglePassCompiler as DefaultCompiler; - // use wasmer_clif_backend::CraneliftCompiler as DefaultCompiler; // TODO Fix default + // use wasmer_clif_backend::CraneliftCompiler as DefaultCompiler; // TODO Fix default lazy_static! { static ref DEFAULT_COMPILER: DefaultCompiler = { DefaultCompiler::new() }; diff --git a/src/webassembly.rs b/src/webassembly.rs index 5a603893b56..e448ce5c6f1 100644 --- a/src/webassembly.rs +++ b/src/webassembly.rs @@ -87,7 +87,10 @@ pub fn run_instance( if is_emscripten_module(module) { run_emscripten_instance(module, instance, path, args)?; } else { - let args: Vec = args.into_iter().map(|x| Value::I32(x.parse().unwrap())).collect(); + let args: Vec = args + .into_iter() + .map(|x| Value::I32(x.parse().unwrap())) + .collect(); println!("{:?}", instance.call("main", &args)?); }; From 4ca27b68b0904fe561fb617f10b9c99efa8dc1dc Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 18 Mar 2019 00:48:50 +0800 Subject: [PATCH 076/100] Manually fix lint errors. --- lib/dynasm-backend/src/codegen_x64.rs | 34 +++++++++++++-------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 3b28370e3f3..374a3f97104 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -531,7 +531,7 @@ impl ModuleCodeGenerator { return Err(CodegenError { message: "dynamic memory isn't supported yet", - }) + }); } _ => {} } @@ -596,7 +596,7 @@ impl ModuleCodeGenerator { return Err(CodegenError { message: "no function", - }) + }); } }; let output = assembler.finalize().unwrap(); @@ -620,7 +620,7 @@ impl ModuleCodeGenerator { return Err(CodegenError { message: "label not found", - }) + }); } }; let offset = match offset { @@ -628,7 +628,7 @@ impl ModuleCodeGenerator { return Err(CodegenError { message: "offset is none", - }) + }); } }; out_labels.push(FuncPtr(output.ptr(*offset) as _)); @@ -644,7 +644,7 @@ impl ModuleCodeGenerator { return Err(CodegenError { message: "no signatures", - }) + }); } }, function_pointers: out_labels, @@ -653,7 +653,7 @@ impl ModuleCodeGenerator { return Err(CodegenError { message: "no function signatures", - }) + }); } }, }; @@ -681,7 +681,7 @@ impl ModuleCodeGenerator { return Err(CodegenError { message: "got function import after code", - }) + }); } }; let id = labels.len(); @@ -1036,7 +1036,7 @@ impl X64FunctionCode { None => { return Err(CodegenError { message: "no value", - }) + }); } }; match val.location { @@ -1136,7 +1136,7 @@ impl X64FunctionCode { _ => { return Err(CodegenError { message: "more than one block returns are not yet supported", - }) + }); } }; @@ -1173,7 +1173,7 @@ impl X64FunctionCode { None => { return Err(CodegenError { message: "no frame (else)", - }) + }); } }; @@ -1201,7 +1201,7 @@ impl X64FunctionCode { _ => { return Err(CodegenError { message: "unexpected if else state", - }) + }); } } @@ -1219,7 +1219,7 @@ impl X64FunctionCode { None => { return Err(CodegenError { message: "no frame (block end)", - }) + }); } }; @@ -1337,7 +1337,7 @@ impl X64FunctionCode { _ => { return Err(CodegenError { message: "multiple return values is not yet supported", - }) + }); } } @@ -1701,7 +1701,7 @@ impl X64FunctionCode { _ => { return Err(CodegenError { message: "more than 1 function returns are not supported", - }) + }); } } @@ -3062,7 +3062,7 @@ impl FunctionCodeGenerator for X64FunctionCode { None => { return Err(CodegenError { message: "signature not found", - }) + }); } }; let sig = match self.signatures.get(sig_index) { @@ -3070,7 +3070,7 @@ impl FunctionCodeGenerator for X64FunctionCode { None => { return Err(CodegenError { message: "signature does not exist", - }) + }); } }; let param_types: Vec = @@ -3116,7 +3116,7 @@ impl FunctionCodeGenerator for X64FunctionCode { None => { return Err(CodegenError { message: "signature does not exist", - }) + }); } }; let mut param_types: Vec = From e48ff027a66f31e8870d390e4360885f9e463097 Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 18 Mar 2019 00:52:16 +0800 Subject: [PATCH 077/100] Remove commented out code. --- lib/dynasm-backend/src/codegen_x64.rs | 67 ++++++++++----------------- lib/dynasm-backend/src/lib.rs | 2 - 2 files changed, 25 insertions(+), 44 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 374a3f97104..786b3043ddb 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -3752,19 +3752,19 @@ impl FunctionCodeGenerator for X64FunctionCode { )?; } /* - 0: 48 85 ff test %rdi,%rdi - 3: 78 0b js 10 - 5: c4 e1 fb 2a c7 vcvtsi2sd %rdi,%xmm0,%xmm0 - a: c3 retq - b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) - 10: 48 89 f8 mov %rdi,%rax - 13: 83 e7 01 and $0x1,%edi - 16: 48 d1 e8 shr %rax - 19: 48 09 f8 or %rdi,%rax - 1c: c4 e1 fb 2a c0 vcvtsi2sd %rax,%xmm0,%xmm0 - 21: c5 fb 58 c0 vaddsd %xmm0,%xmm0,%xmm0 - 25: c3 retq - */ + 0: 48 85 ff test %rdi,%rdi + 3: 78 0b js 10 + 5: c4 e1 fb 2a c7 vcvtsi2sd %rdi,%xmm0,%xmm0 + a: c3 retq + b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) + 10: 48 89 f8 mov %rdi,%rax + 13: 83 e7 01 and $0x1,%edi + 16: 48 d1 e8 shr %rax + 19: 48 09 f8 or %rdi,%rax + 1c: c4 e1 fb 2a c0 vcvtsi2sd %rax,%xmm0,%xmm0 + 21: c5 fb 58 c0 vaddsd %xmm0,%xmm0,%xmm0 + 25: c3 retq + */ Operator::F32ConvertUI64 => { Self::emit_unop( assembler, @@ -4725,18 +4725,18 @@ impl FunctionCodeGenerator for X64FunctionCode { Self::emit_f64_int_conv_check(assembler, reg, -1.0, 18446744073709551616.0); /* - LCPI0_0: - .quad 4890909195324358656 ## double 9.2233720368547758E+18 - - movsd LCPI0_0(%rip), %xmm1 ## xmm1 = mem[0],zero - movapd %xmm0, %xmm2 - subsd %xmm1, %xmm2 - cvttsd2si %xmm2, %rax - movabsq $-9223372036854775808, %rcx ## imm = 0x8000000000000000 - xorq %rax, %rcx - cvttsd2si %xmm0, %rax - ucomisd %xmm1, %xmm0 - cmovaeq %rcx, %rax + LCPI0_0: + .quad 4890909195324358656 ## double 9.2233720368547758E+18 + + movsd LCPI0_0(%rip), %xmm1 ## xmm1 = mem[0],zero + movapd %xmm0, %xmm2 + subsd %xmm1, %xmm2 + cvttsd2si %xmm2, %rax + movabsq $-9223372036854775808, %rcx ## imm = 0x8000000000000000 + xorq %rax, %rcx + cvttsd2si %xmm0, %rax + ucomisd %xmm1, %xmm0 + cmovaeq %rcx, %rax */ dynasm!( @@ -4908,18 +4908,6 @@ unsafe extern "C" fn invoke_import( let vmctx: &mut vm::Ctx = &mut *vmctx; let import = (*vmctx.imported_funcs.offset(import_id as isize)).func; - /*let n_args = (stack_base as usize - stack_top as usize) / 8; - - println!("Calling import: {:?} with vmctx = {:?}, n_args = {}", - import, - vmctx as *mut _, - n_args, - ); - - for i in 0..n_args { - println!("Arg: {:?}", * ((stack_top as usize + i * 8) as *const *const ())); - }*/ - CONSTRUCT_STACK_AND_CALL_NATIVE(stack_top, stack_base, vmctx, import) } @@ -4962,11 +4950,6 @@ unsafe extern "C" fn call_indirect( } }; - /*println!( - "SIG INDEX = {}, FUNC INDEX = {:?}, ELEM INDEX = {}", - sig_index, func_index, elem_index - );*/ - if ctx.signatures[SigIndex::new(sig_index)] != ctx.signatures[ctx.function_signatures[func_index]] { diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index bb492ba09d1..088aa42ce00 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -40,7 +40,6 @@ impl CacheGen for Placeholder { &self, _module: &ModuleInner, ) -> Result<(Box, Box<[u8]>, Memory), CacheError> { - // unimplemented!() Err(CacheError::Unknown( "the dynasm backend doesn't support caching yet".to_string(), )) @@ -81,7 +80,6 @@ impl Compiler for SinglePassCompiler { Err(CacheError::Unknown( "the dynasm backend doesn't support caching yet".to_string(), )) - // unimplemented!("the dynasm backend doesn't support caching yet") } } From c5694ec527f717e626f433e1fb4b6a14d38f3bd8 Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 18 Mar 2019 00:53:07 +0800 Subject: [PATCH 078/100] Fix lint errors. --- lib/dynasm-backend/src/codegen_x64.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 786b3043ddb..09c8059c812 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -342,7 +342,7 @@ impl ProtectedCaller for X64ExecutionContext { _ => { return Err(RuntimeError::Trap { msg: "signature mismatch".into(), - }) + }); } } } else { @@ -352,7 +352,7 @@ impl ProtectedCaller for X64ExecutionContext { _ => { return Err(RuntimeError::Trap { msg: "signature mismatch".into(), - }) + }); } } } From 662a64956597a3c4dc47a4ef5a147c96f3663b24 Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 18 Mar 2019 01:13:04 +0800 Subject: [PATCH 079/100] Only run dynasm tests on nightly. --- .circleci/config.yml | 3 +++ Makefile | 8 ++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1a462b68052..7e4c9e94a8b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -270,6 +270,9 @@ jobs: - run: | export LLVM_SYS_70_PREFIX="`pwd`/clang+llvm-7.0.0-x86_64-linux-gnu-ubuntu-16.04/" make test + make test-nightly + make test-emscripten + make test-emscripten-nightly - save_cache: paths: - /usr/local/cargo/registry diff --git a/Makefile b/Makefile index a4ecfca3638..b0a442cda47 100644 --- a/Makefile +++ b/Makefile @@ -37,16 +37,20 @@ test: cargo test --all --exclude wasmer-runtime-c-api --exclude wasmer-emscripten --exclude wasmer-spectests -- $(runargs) # cargo test --all --exclude wasmer-emscripten -- --test-threads=1 $(runargs) cargo test --manifest-path lib/spectests/Cargo.toml --features clif - cargo test --manifest-path lib/spectests/Cargo.toml --features dynasm cargo test --manifest-path lib/spectests/Cargo.toml --features llvm cargo build -p wasmer-runtime-c-api cargo test -p wasmer-runtime-c-api -- --nocapture +test-nightly: + cargo test --manifest-path lib/spectests/Cargo.toml --features dynasm + test-emscripten: cargo test --manifest-path lib/emscripten/Cargo.toml --features clif -- --test-threads=1 $(runargs) - cargo test --manifest-path lib/emscripten/Cargo.toml --features dynasm -- --test-threads=1 $(runargs) cargo test --manifest-path lib/emscripten/Cargo.toml --features llvm -- --test-threads=1 $(runargs) +test-emscripten-nightly: + cargo test --manifest-path lib/emscripten/Cargo.toml --features dynasm -- --test-threads=1 $(runargs) + release: # If you are in OS-X, you will need mingw-w64 for cross compiling to windows # brew install mingw-w64 From e1cb4fc10c2708853480626493f988bbc8fbd363 Mon Sep 17 00:00:00 2001 From: Brandon Fish Date: Sun, 17 Mar 2019 18:31:06 -0500 Subject: [PATCH 080/100] Removed submodule cranlift --- cranelift | 1 - 1 file changed, 1 deletion(-) delete mode 160000 cranelift diff --git a/cranelift b/cranelift deleted file mode 160000 index cb62a1ead2c..00000000000 --- a/cranelift +++ /dev/null @@ -1 +0,0 @@ -Subproject commit cb62a1ead2c5346ccb0f1224ecae5939ac064f87 From caa239a3dfe9c1d659e111ab00993d43af87d390 Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 18 Mar 2019 16:06:20 +0800 Subject: [PATCH 081/100] Make wasmer-dynasm-backend optional. --- lib/emscripten/Cargo.toml | 4 ++-- lib/spectests/Cargo.toml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/emscripten/Cargo.toml b/lib/emscripten/Cargo.toml index cfffe82f825..306512dacae 100644 --- a/lib/emscripten/Cargo.toml +++ b/lib/emscripten/Cargo.toml @@ -21,7 +21,7 @@ rand = "0.6" [dev-dependencies] wasmer-clif-backend = { path = "../clif-backend", version = "0.2.0" } wabt = "0.7.2" -wasmer-dynasm-backend = { path = "../dynasm-backend", version = "0.1.0" } +wasmer-dynasm-backend = { path = "../dynasm-backend", version = "0.1.0", optional = true } [target.'cfg(not(windows))'.dev-dependencies] wasmer-llvm-backend = { path = "../llvm-backend", version = "0.1.0" } @@ -32,4 +32,4 @@ glob = "0.2.11" [features] clif = [] llvm = [] -dynasm = [] \ No newline at end of file +dynasm = ["wasmer-dynasm-backend"] \ No newline at end of file diff --git a/lib/spectests/Cargo.toml b/lib/spectests/Cargo.toml index 3318589f7e2..c83dd5ad632 100644 --- a/lib/spectests/Cargo.toml +++ b/lib/spectests/Cargo.toml @@ -16,7 +16,7 @@ wabt = "0.7.2" [dev-dependencies] wasmer-clif-backend = { path = "../clif-backend", version = "0.2.0" } -wasmer-dynasm-backend = { path = "../dynasm-backend", version = "0.1.0" } +wasmer-dynasm-backend = { path = "../dynasm-backend", version = "0.1.0", optional = true } wabt = "0.7.2" [target.'cfg(not(windows))'.dependencies] @@ -27,4 +27,4 @@ default = ["fast-tests"] fast-tests = [] clif = [] llvm = ["wasmer-llvm-backend"] -dynasm = [] \ No newline at end of file +dynasm = ["wasmer-dynasm-backend"] \ No newline at end of file From b94c04649a8f9e2fa49d9fccbd40a4e2ee567215 Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 18 Mar 2019 16:08:43 +0800 Subject: [PATCH 082/100] Remove println. --- src/webassembly.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/webassembly.rs b/src/webassembly.rs index e448ce5c6f1..8ae38935b99 100644 --- a/src/webassembly.rs +++ b/src/webassembly.rs @@ -91,7 +91,7 @@ pub fn run_instance( .into_iter() .map(|x| Value::I32(x.parse().unwrap())) .collect(); - println!("{:?}", instance.call("main", &args)?); + instance.call("main", &args)?; }; Ok(()) From af8f307a92fdd916f6955f95534e8cb760d60885 Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 18 Mar 2019 16:14:11 +0800 Subject: [PATCH 083/100] Fix dependencies. --- Cargo.toml | 4 ++-- lib/emscripten/Cargo.toml | 10 ++++------ lib/spectests/Cargo.toml | 8 +++----- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7a928a24efa..e0d9a91d26e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,9 +25,8 @@ wasmer-clif-backend = { path = "lib/clif-backend" } wasmer-runtime = { path = "lib/runtime" } wasmer-runtime-core = { path = "lib/runtime-core" } wasmer-emscripten = { path = "lib/emscripten" } - -[target.'cfg(not(windows))'.dependencies] wasmer-llvm-backend = { path = "lib/llvm-backend", optional = true } +wasmer-dynasm-backend = { path = "lib/dynasm-backend", optional = true } [workspace] members = ["lib/clif-backend", "lib/dynasm-backend", "lib/runtime", "lib/runtime-core", "lib/emscripten", "lib/spectests", "lib/win-exception-handler", "lib/runtime-c-api", "lib/llvm-backend"] @@ -42,3 +41,4 @@ default = ["fast-tests"] # This feature will allow cargo test to run much faster fast-tests = [] llvm = ["wasmer-llvm-backend"] +dynasm = ["wasmer-dynasm-backend"] \ No newline at end of file diff --git a/lib/emscripten/Cargo.toml b/lib/emscripten/Cargo.toml index 306512dacae..2823b1c4f3e 100644 --- a/lib/emscripten/Cargo.toml +++ b/lib/emscripten/Cargo.toml @@ -14,22 +14,20 @@ lazy_static = "1.2.0" libc = "0.2.49" byteorder = "1" time = "0.1.41" +wasmer-clif-backend = { path = "../clif-backend", version = "0.2.0" } +wasmer-dynasm-backend = { path = "../dynasm-backend", version = "0.1.0", optional = true } +wasmer-llvm-backend = { path = "../llvm-backend", version = "0.1.0", optional = true } [target.'cfg(windows)'.dependencies] rand = "0.6" [dev-dependencies] -wasmer-clif-backend = { path = "../clif-backend", version = "0.2.0" } wabt = "0.7.2" -wasmer-dynasm-backend = { path = "../dynasm-backend", version = "0.1.0", optional = true } - -[target.'cfg(not(windows))'.dev-dependencies] -wasmer-llvm-backend = { path = "../llvm-backend", version = "0.1.0" } [build-dependencies] glob = "0.2.11" [features] clif = [] -llvm = [] +llvm = ["wasmer-llvm-backend"] dynasm = ["wasmer-dynasm-backend"] \ No newline at end of file diff --git a/lib/spectests/Cargo.toml b/lib/spectests/Cargo.toml index c83dd5ad632..8dd8f83f492 100644 --- a/lib/spectests/Cargo.toml +++ b/lib/spectests/Cargo.toml @@ -10,18 +10,16 @@ build = "build/mod.rs" [dependencies] wasmer-runtime-core = { path = "../runtime-core", version = "0.2.0" } +wasmer-clif-backend = { path = "../clif-backend", version = "0.2.0" } +wasmer-llvm-backend = { path = "../llvm-backend", version = "0.1.0", optional = true } +wasmer-dynasm-backend = { path = "../dynasm-backend", version = "0.1.0", optional = true } [build-dependencies] wabt = "0.7.2" [dev-dependencies] -wasmer-clif-backend = { path = "../clif-backend", version = "0.2.0" } -wasmer-dynasm-backend = { path = "../dynasm-backend", version = "0.1.0", optional = true } wabt = "0.7.2" -[target.'cfg(not(windows))'.dependencies] -wasmer-llvm-backend = { path = "../llvm-backend", version = "0.1.0", optional = true } - [features] default = ["fast-tests"] fast-tests = [] From cd5c1456d792dbb85f08833e692ec5f3dd521610 Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 18 Mar 2019 16:15:48 +0800 Subject: [PATCH 084/100] Fix default compiler. --- lib/runtime/src/lib.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/runtime/src/lib.rs b/lib/runtime/src/lib.rs index 13557d38378..55efcddec72 100644 --- a/lib/runtime/src/lib.rs +++ b/lib/runtime/src/lib.rs @@ -158,9 +158,11 @@ fn default_compiler() -> &'static dyn Compiler { #[cfg(feature = "llvm")] use wasmer_llvm_backend::LLVMCompiler as DefaultCompiler; - #[cfg(not(feature = "llvm"))] + #[cfg(feature = "dynasm")] use wasmer_dynasm_backend::SinglePassCompiler as DefaultCompiler; - // use wasmer_clif_backend::CraneliftCompiler as DefaultCompiler; // TODO Fix default + + #[cfg(not(any(feature = "llvm", feature = "dynasm")))] + use wasmer_clif_backend::CraneliftCompiler as DefaultCompiler; lazy_static! { static ref DEFAULT_COMPILER: DefaultCompiler = { DefaultCompiler::new() }; From b06a49e1c0929cee61c87d9dfce580c2cf4f593f Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 18 Mar 2019 16:22:57 +0800 Subject: [PATCH 085/100] Move wasmer-dynasm-backend out of default-compiler. --- lib/runtime/Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/runtime/Cargo.toml b/lib/runtime/Cargo.toml index 8f2c3c2ff7c..c137922f063 100644 --- a/lib/runtime/Cargo.toml +++ b/lib/runtime/Cargo.toml @@ -33,10 +33,11 @@ optional = true [features] default = ["default-compiler"] -default-compiler = ["wasmer-clif-backend", "wasmer-dynasm-backend"] +default-compiler = ["wasmer-clif-backend"] cache = ["default-compiler"] debug = ["wasmer-clif-backend/debug", "wasmer-runtime-core/debug"] llvm = ["wasmer-llvm-backend"] +dynasm = ["wasmer-dynasm-backend"] [[bench]] name = "nginx" From eb606a6d70234fbaeb0d326df4b62da1a5c456d7 Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 18 Mar 2019 16:42:30 +0800 Subject: [PATCH 086/100] Disable clippy temporarily. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b0a442cda47..8d8fe66c690 100644 --- a/Makefile +++ b/Makefile @@ -28,7 +28,7 @@ integration-tests: release lint: cargo fmt --all -- --check - cargo clippy --all + # cargo clippy --all precommit: lint test From af24cfc8c4880a17354473a08fecf8ae3242fafb Mon Sep 17 00:00:00 2001 From: losfair Date: Mon, 18 Mar 2019 17:08:55 +0800 Subject: [PATCH 087/100] Exclude dynasm backend in cargo test. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 8d8fe66c690..46eae3a1982 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ precommit: lint test test: # We use one thread so the emscripten stdouts doesn't collide - cargo test --all --exclude wasmer-runtime-c-api --exclude wasmer-emscripten --exclude wasmer-spectests -- $(runargs) + cargo test --all --exclude wasmer-runtime-c-api --exclude wasmer-emscripten --exclude wasmer-spectests --exclude wasmer-dynasm-backend -- $(runargs) # cargo test --all --exclude wasmer-emscripten -- --test-threads=1 $(runargs) cargo test --manifest-path lib/spectests/Cargo.toml --features clif cargo test --manifest-path lib/spectests/Cargo.toml --features llvm From 2ab2205d6b50772d1f6c1917807f297ec25b668b Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 19 Mar 2019 11:47:38 +0800 Subject: [PATCH 088/100] Allow cross-module indirect calls. --- lib/dynasm-backend/src/codegen_x64.rs | 67 ++++++++++++++++----------- lib/dynasm-backend/src/lib.rs | 10 ---- lib/runtime-core/src/backing.rs | 2 - lib/runtime-core/src/table/anyfunc.rs | 2 - lib/runtime-core/src/vm.rs | 4 +- 5 files changed, 40 insertions(+), 45 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 09c8059c812..1e45fcde553 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -83,7 +83,9 @@ lazy_static! { ; push r14 ; push r13 ; push r12 - ; sub rsp, 8 // align to 16 bytes + ; push r11 + ; push rbp + ; mov rbp, rsp ; mov r15, rdi ; mov r14, rsi @@ -95,34 +97,56 @@ lazy_static! { ; sub r14, 8 ; cmp r14, r15 ; jb >stack_ready - ; mov rsi, [r14] + ; mov rsi, [r14] ; sub r14, 8 ; cmp r14, r15 ; jb >stack_ready - ; mov rdx, [r14] + ; mov rdx, [r14] ; sub r14, 8 ; cmp r14, r15 ; jb >stack_ready - ; mov rcx, [r14] + ; mov rcx, [r14] ; sub r14, 8 ; cmp r14, r15 ; jb >stack_ready - ; mov r8, [r14] + ; mov r8, [r14] ; sub r14, 8 ; cmp r14, r15 ; jb >stack_ready + ; mov r9, [r14] + ; sub r14, 8 + ; cmp r14, r15 + ; jb >stack_ready - ; ud2 // FIXME + ; mov rax, r14 + ; sub rax, r15 + ; sub rsp, rax + ; sub rsp, 8 + ; mov rax, QWORD 0xfffffffffffffff0u64 as i64 + ; and rsp, rax + ; mov rax, rsp + ; loop_begin: + ; mov r11, [r14] + ; mov [rax], r11 + ; sub r14, 8 + ; add rax, 8 + ; cmp r14, r15 + ; jb >stack_ready + ; jmp x, - None => { - eprintln!("empty table entry"); - protect_unix::trigger_trap(); - } - }; + let dynamic_sigindex = *(*vmctx).dynamic_sigindices.offset(sig_index as isize); + + if anyfunc.func.is_null() { + eprintln!("null anyfunc"); + protect_unix::trigger_trap(); + } - if ctx.signatures[SigIndex::new(sig_index)] - != ctx.signatures[ctx.function_signatures[func_index]] - { + if anyfunc.sig_id.0 != dynamic_sigindex.0 { eprintln!("signature mismatch"); protect_unix::trigger_trap(); } - let func = ctx.function_pointers[func_index.index() as usize].0; - CALL_WASM( - stack_top, - stack_base as usize - stack_top as usize, - func as _, - memory_base, - vmctx, - ) as u64 + CONSTRUCT_STACK_AND_CALL_NATIVE(stack_top, stack_base, anyfunc.ctx, anyfunc.func) } #[repr(u64)] diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index 088aa42ce00..5eb91f8e5fb 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -46,16 +46,6 @@ impl CacheGen for Placeholder { } } -impl FuncResolver for Placeholder { - fn get( - &self, - _module: &ModuleInner, - _local_func_index: LocalFuncIndex, - ) -> Option> { - NonNull::new(0x3f3f3f3f3f3f3f3fusize as *mut vm::Func) - } -} - pub struct SinglePassCompiler {} impl SinglePassCompiler { pub fn new() -> Self { diff --git a/lib/runtime-core/src/backing.rs b/lib/runtime-core/src/backing.rs index 2c9630764e4..856d6770e5b 100644 --- a/lib/runtime-core/src/backing.rs +++ b/lib/runtime-core/src/backing.rs @@ -234,7 +234,6 @@ impl LocalBacking { func, ctx, sig_id, - func_index: Some(func_index), }; } }); @@ -278,7 +277,6 @@ impl LocalBacking { func, ctx, sig_id, - func_index: Some(func_index), }; } }); diff --git a/lib/runtime-core/src/table/anyfunc.rs b/lib/runtime-core/src/table/anyfunc.rs index 8312e2506e8..789f67ada2b 100644 --- a/lib/runtime-core/src/table/anyfunc.rs +++ b/lib/runtime-core/src/table/anyfunc.rs @@ -107,7 +107,6 @@ impl AnyfuncTable { func: ptr, ctx: ptr::null_mut(), sig_id, - func_index: None, } } AnyfuncInner::Managed(ref func) => { @@ -118,7 +117,6 @@ impl AnyfuncTable { func: func.raw(), ctx: func.instance_inner.vmctx, sig_id, - func_index: Some(func.func_index), } } }; diff --git a/lib/runtime-core/src/vm.rs b/lib/runtime-core/src/vm.rs index b668833cd55..6e2a48aa1cd 100644 --- a/lib/runtime-core/src/vm.rs +++ b/lib/runtime-core/src/vm.rs @@ -38,7 +38,7 @@ pub struct Ctx { /// from a static, module-local signature id to a runtime-global /// signature id. This is used to allow call-indirect to other /// modules safely. - pub(crate) dynamic_sigindices: *const SigId, + pub dynamic_sigindices: *const SigId, pub(crate) local_functions: *const *const Func, @@ -302,7 +302,6 @@ pub struct Anyfunc { pub func: *const Func, pub ctx: *mut Ctx, pub sig_id: SigId, - pub func_index: Option, } impl Anyfunc { @@ -311,7 +310,6 @@ impl Anyfunc { func: ptr::null(), ctx: ptr::null_mut(), sig_id: SigId(u32::max_value()), - func_index: None, } } From ebaf2dc5a896ad7fdae13c344c136885760a3939 Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 19 Mar 2019 11:52:27 +0800 Subject: [PATCH 089/100] Make DynFunc::func_index private. --- lib/runtime-core/src/instance.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/runtime-core/src/instance.rs b/lib/runtime-core/src/instance.rs index e069c381b73..3dee6ee5062 100644 --- a/lib/runtime-core/src/instance.rs +++ b/lib/runtime-core/src/instance.rs @@ -434,7 +434,7 @@ pub struct DynFunc<'a> { pub(crate) signature: Arc, module: &'a ModuleInner, pub(crate) instance_inner: &'a InstanceInner, - pub func_index: FuncIndex, + func_index: FuncIndex, } impl<'a> DynFunc<'a> { From 61abe70042f61275542b05818b94fff3f2ee71ea Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 19 Mar 2019 11:52:43 +0800 Subject: [PATCH 090/100] cargo fmt --- lib/runtime-core/src/backing.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/lib/runtime-core/src/backing.rs b/lib/runtime-core/src/backing.rs index 856d6770e5b..48f2d225678 100644 --- a/lib/runtime-core/src/backing.rs +++ b/lib/runtime-core/src/backing.rs @@ -230,11 +230,7 @@ impl LocalBacking { } }; - elements[init_base + i] = vm::Anyfunc { - func, - ctx, - sig_id, - }; + elements[init_base + i] = vm::Anyfunc { func, ctx, sig_id }; } }); } @@ -273,11 +269,7 @@ impl LocalBacking { } }; - elements[init_base + i] = vm::Anyfunc { - func, - ctx, - sig_id, - }; + elements[init_base + i] = vm::Anyfunc { func, ctx, sig_id }; } }); } From a006a368c5507908aca46e6db40b2f0f0354a7d4 Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 19 Mar 2019 20:01:38 +0800 Subject: [PATCH 091/100] Cleanup. --- lib/runtime-core/src/error.rs | 9 --------- lib/runtime-core/src/types.rs | 15 --------------- lib/runtime-core/src/vm.rs | 2 +- 3 files changed, 1 insertion(+), 25 deletions(-) diff --git a/lib/runtime-core/src/error.rs b/lib/runtime-core/src/error.rs index ecb5c020768..61d5ac3a227 100644 --- a/lib/runtime-core/src/error.rs +++ b/lib/runtime-core/src/error.rs @@ -5,7 +5,6 @@ use crate::types::{ use core::borrow::Borrow; use std::any::Any; use std::sync::Arc; -use wasmparser::BinaryReaderError; pub type Result = std::result::Result; pub type CompileResult = std::result::Result; @@ -25,14 +24,6 @@ pub enum CompileError { InternalError { msg: String }, } -impl From for CompileError { - fn from(other: BinaryReaderError) -> CompileError { - CompileError::InternalError { - msg: format!("{:?}", other), - } - } -} - impl PartialEq for CompileError { fn eq(&self, _other: &CompileError) -> bool { false diff --git a/lib/runtime-core/src/types.rs b/lib/runtime-core/src/types.rs index fb30c6276f1..b84f29683aa 100644 --- a/lib/runtime-core/src/types.rs +++ b/lib/runtime-core/src/types.rs @@ -18,21 +18,6 @@ pub enum Type { F64, } -impl Type { - pub fn from_wasmparser_type(other: ::wasmparser::Type) -> CompileResult { - use wasmparser::Type as WPType; - match other { - WPType::I32 => Ok(Type::I32), - WPType::I64 => Ok(Type::I64), - WPType::F32 => Ok(Type::F32), - WPType::F64 => Ok(Type::F64), - _ => Err(CompileError::ValidationError { - msg: "type cannot be converted into a core type".into(), - }), - } - } -} - impl std::fmt::Display for Type { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{:?}", self) diff --git a/lib/runtime-core/src/vm.rs b/lib/runtime-core/src/vm.rs index 6e2a48aa1cd..6a5231da4ad 100644 --- a/lib/runtime-core/src/vm.rs +++ b/lib/runtime-core/src/vm.rs @@ -3,7 +3,7 @@ use crate::{ memory::Memory, module::ModuleInner, structures::TypedIndex, - types::{FuncIndex, LocalOrImport, MemoryIndex}, + types::{LocalOrImport, MemoryIndex}, }; use std::{ffi::c_void, mem, ptr}; From 14da8abc23418d1e97f060076a2841b9133c12d5 Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 19 Mar 2019 21:27:49 +0800 Subject: [PATCH 092/100] Run-time memory bound checking. --- lib/dynasm-backend/src/codegen_x64.rs | 230 ++++++++++++++++++++++---- lib/runtime-core/src/types.rs | 4 +- 2 files changed, 198 insertions(+), 36 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 1e45fcde553..4286be9d5c3 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -30,7 +30,14 @@ thread_local! { } lazy_static! { - static ref CALL_WASM: unsafe extern "C" fn(params: *const u8, params_len: usize, target: *const u8, memory_base: *mut u8, vmctx: *mut vm::Ctx) -> i64 = { + static ref CALL_WASM: unsafe extern "C" fn( + params: *const u8, + params_len: usize, + target: *const u8, + memory_base: *mut u8, + memory_size_pages: usize, + vmctx: *mut vm::Ctx + ) -> i64 = { let mut assembler = Assembler::new().unwrap(); let offset = assembler.offset(); dynasm!( @@ -40,8 +47,16 @@ lazy_static! { ; push r13 ; push r14 ; push r15 + ; mov r15, rcx // memory_base - ; mov r14, r8 // vmctx + + // Use the upper 16 bits of r15 to store memory size (in pages). This can support memory size up to 4GB. + // Wasmer currently only runs in usermode so here we assume the upper 17 bits of memory base address are all zero. + // FIXME: Change this if want to use this backend in kernel mode. + ; shl r8, 48 + ; or r15, r8 + + ; mov r14, r9 // vmctx ; lea rax, [>after_call] ; push rax ; push rbp @@ -196,7 +211,7 @@ impl Register { 7 => R11, 8 => RBX, 9 => R12, - 10 => R13, + // 10 => R13, // R13 is reserved as temporary register. // 11 => R14, // R14 is reserved for vmctx. // 12 => R15, // R15 is reserved for memory base pointer. _ => unreachable!(), @@ -382,22 +397,22 @@ impl ProtectedCaller for X64ExecutionContext { } } - let memory_base: *mut u8 = if _module.info.memories.len() > 0 { + let (memory_base, memory_size): (*mut u8, usize) = if _module.info.memories.len() > 0 { if _module.info.memories.len() != 1 || _module.info.imported_memories.len() != 0 { return Err(RuntimeError::Trap { msg: "only one linear memory is supported".into(), }); } - unsafe { (**(*_vmctx).memories).base } + unsafe { ((**(*_vmctx).memories).base, (**(*_vmctx).memories).bound) } } else if _module.info.imported_memories.len() > 0 { if _module.info.memories.len() != 0 || _module.info.imported_memories.len() != 1 { return Err(RuntimeError::Trap { msg: "only one linear memory is supported".into(), }); } - unsafe { (**(*_vmctx).imported_memories).base } + unsafe { ((**(*_vmctx).imported_memories).base, (**(*_vmctx).imported_memories).bound) } } else { - ::std::ptr::null_mut() + (::std::ptr::null_mut(), 0) }; //println!("MEMORY = {:?}", memory_base); @@ -410,6 +425,7 @@ impl ProtectedCaller for X64ExecutionContext { param_buf.len(), ptr, memory_base, + memory_size.wrapping_shr(16), _vmctx, ) }) @@ -545,22 +561,6 @@ impl ModuleCodeGenerator Result<(), CodegenError> { - for mem in module_info - .memories - .iter() - .map(|(_, v)| v) - .chain(module_info.imported_memories.iter().map(|(_, v)| &v.1)) - { - match mem.memory_type() { - MemoryType::Dynamic => { - return Err(CodegenError { - message: "dynamic memory isn't supported yet", - }); - } - _ => {} - } - } - Ok(()) } @@ -1375,6 +1375,46 @@ impl X64FunctionCode { Ok(()) } + fn emit_update_memory_from_ctx( + assembler: &mut Assembler, + info: &ModuleInfo, + ) -> Result<(), CodegenError> { + if info.memories.len() > 0 { + if info.memories.len() != 1 || info.imported_memories.len() != 0 { + return Err(CodegenError { + message: "only one linear memory is supported", + }); + } + dynasm!( + assembler + ; mov r15, r14 => vm::Ctx.memories + ); + } else if info.imported_memories.len() > 0 { + if info.memories.len() != 0 || info.imported_memories.len() != 1 { + return Err(CodegenError { + message: "only one linear memory is supported", + }); + } + dynasm!( + assembler + ; mov r15, r14 => vm::Ctx.imported_memories + ); + } else { + return Ok(()); + }; + + dynasm!( + assembler + ; mov r15, [r15] + ; mov r13, r15 => LocalMemory.bound + ; shr r13, 16 // 65536 bytes per page + ; shl r13, 48 + ; mov r15, r15 => LocalMemory.base + ; or r15, r13 + ); + Ok(()) + } + fn emit_managed_call_trampoline( assembler: &mut Assembler, info: &ModuleInfo, @@ -1416,7 +1456,7 @@ impl X64FunctionCode { dynasm!( assembler - ; mov r8, rdi // vmctx + ; mov r9, rdi // vmctx ; mov rdx, QWORD target.0 as usize as i64 ; mov rsi, QWORD (num_params * 8) as i64 ; mov rdi, rsp @@ -1430,7 +1470,7 @@ impl X64FunctionCode { } dynasm!( assembler - ; mov rcx, r8 => vm::Ctx.memories + ; mov rcx, r9 => vm::Ctx.memories ); true } else if info.imported_memories.len() > 0 { @@ -1441,7 +1481,7 @@ impl X64FunctionCode { } dynasm!( assembler - ; mov rcx, r8 => vm::Ctx.imported_memories + ; mov rcx, r9 => vm::Ctx.imported_memories ); true } else { @@ -1452,6 +1492,8 @@ impl X64FunctionCode { dynasm!( assembler ; mov rcx, [rcx] + ; mov r8, rcx => LocalMemory.bound + ; shr r8, 16 // 65536 bytes per page ; mov rcx, rcx => LocalMemory.base ); } else { @@ -1732,11 +1774,48 @@ impl X64FunctionCode { Ok(()) } + fn emit_memory_bound_check_if_needed( + assembler: &mut Assembler, + module_info: &ModuleInfo, + offset_reg: Register, + value_size: usize, + ) { + let mem_desc = match MemoryIndex::new(0).local_or_import(module_info) { + LocalOrImport::Local(local_mem_index) => &module_info.memories[local_mem_index], + LocalOrImport::Import(import_mem_index) => &module_info.imported_memories[import_mem_index].1, + }; + let need_check = match mem_desc.memory_type() { + MemoryType::Dynamic => true, + MemoryType::Static | MemoryType::SharedStatic => false, + }; + if need_check || true { + dynasm!( + assembler + ; movq xmm5, r14 + ; lea r14, [Rq(offset_reg as u8) + value_size as i32] // overflow isn't possible since offset_reg contains a 32-bit value. + + ; mov r13, r15 + ; shr r13, 48 + ; shl r13, 16 + ; cmp r14, r13 + ; ja >out_of_bounds + ; jmp >ok + + ; out_of_bounds: + ; ud2 + ; ok: + ; movq r14, xmm5 + ); + } + } + fn emit_memory_load( assembler: &mut Assembler, value_stack: &mut ValueStack, f: F, out_ty: WpType, + module_info: &ModuleInfo, + read_size: usize, ) -> Result<(), CodegenError> { let addr_info = value_stack.pop()?; let out_loc = value_stack.push(out_ty); @@ -1752,9 +1831,16 @@ impl X64FunctionCode { match addr_info.location { ValueLocation::Register(x) => { let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rd(reg as u8), Rd(reg as u8) + ); + Self::emit_memory_bound_check_if_needed(assembler, module_info, reg, read_size); dynasm!( assembler ; add Rq(reg as u8), r15 + ; shl Rq(reg as u8), 16 + ; shr Rq(reg as u8), 16 ); f(assembler, reg); } @@ -1762,7 +1848,14 @@ impl X64FunctionCode { dynasm!( assembler ; pop rax + ; mov eax, eax + ); + Self::emit_memory_bound_check_if_needed(assembler, module_info, Register::RAX, read_size); + dynasm!( + assembler ; add rax, r15 + ; shl rax, 16 + ; shr rax, 16 ); f(assembler, Register::RAX); dynasm!( @@ -1779,6 +1872,8 @@ impl X64FunctionCode { value_stack: &mut ValueStack, f: F, value_ty: WpType, + module_info: &ModuleInfo, + write_size: usize, ) -> Result<(), CodegenError> { let value_info = value_stack.pop()?; let addr_info = value_stack.pop()?; @@ -1800,9 +1895,16 @@ impl X64FunctionCode { let value_reg = Register::from_scratch_reg(x); let addr_reg = Register::from_scratch_reg(addr_info.location.get_register().unwrap()); // must be a register + dynasm!( + assembler + ; mov Rd(addr_reg as u8), Rd(addr_reg as u8) + ); + Self::emit_memory_bound_check_if_needed(assembler, module_info, addr_reg, write_size); dynasm!( assembler ; add Rq(addr_reg as u8), r15 + ; shl Rq(addr_reg as u8), 16 + ; shr Rq(addr_reg as u8), 16 ); f(assembler, addr_reg, value_reg); } @@ -1810,9 +1912,16 @@ impl X64FunctionCode { match addr_info.location { ValueLocation::Register(x) => { let addr_reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rd(addr_reg as u8), Rd(addr_reg as u8) + ); + Self::emit_memory_bound_check_if_needed(assembler, module_info, addr_reg, write_size); dynasm!( assembler ; add Rq(addr_reg as u8), r15 + ; shl Rq(addr_reg as u8), 16 + ; shr Rq(addr_reg as u8), 16 ; pop rax ); f(assembler, addr_reg, Register::RAX); @@ -1823,7 +1932,17 @@ impl X64FunctionCode { ; mov [rsp - 8], rcx // red zone ; pop rax // value ; pop rcx // address + ); + dynasm!( + assembler + ; mov ecx, ecx + ); + Self::emit_memory_bound_check_if_needed(assembler, module_info, Register::RCX, write_size); + dynasm!( + assembler ; add rcx, r15 + ; shl rcx, 16 + ; shr rcx, 16 ); f(assembler, Register::RCX, Register::RAX); dynasm!( @@ -3392,6 +3511,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I32, + module_info, + 4 )?; } Operator::I32Load8U { memarg } => { @@ -3405,6 +3526,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I32, + module_info, + 1 )?; } Operator::I32Load8S { memarg } => { @@ -3418,6 +3541,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I32, + module_info, + 1 )?; } Operator::I32Load16U { memarg } => { @@ -3431,6 +3556,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I32, + module_info, + 2 )?; } Operator::I32Load16S { memarg } => { @@ -3444,6 +3571,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I32, + module_info, + 2 )?; } Operator::I32Store { memarg } => { @@ -3457,6 +3586,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I32, + module_info, + 4 )?; } Operator::I32Store8 { memarg } => { @@ -3470,6 +3601,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I32, + module_info, + 1 )?; } Operator::I32Store16 { memarg } => { @@ -3483,6 +3616,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I32, + module_info, + 2 )?; } Operator::I64Load { memarg } => { @@ -3496,6 +3631,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I64, + module_info, + 8 )?; } Operator::I64Load8U { memarg } => { @@ -3509,6 +3646,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I64, + module_info, + 1 )?; } Operator::I64Load8S { memarg } => { @@ -3522,6 +3661,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I64, + module_info, + 1 )?; } Operator::I64Load16U { memarg } => { @@ -3535,6 +3676,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I64, + module_info, + 2 )?; } Operator::I64Load16S { memarg } => { @@ -3548,6 +3691,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I64, + module_info, + 2 )?; } Operator::I64Load32U { memarg } => { @@ -3561,6 +3706,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I64, + module_info, + 4 )?; } Operator::I64Load32S { memarg } => { @@ -3574,6 +3721,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I64, + module_info, + 4 )?; } Operator::I64Store { memarg } => { @@ -3587,6 +3736,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I64, + module_info, + 8 )?; } Operator::I64Store8 { memarg } => { @@ -3600,6 +3751,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I64, + module_info, + 1 )?; } Operator::I64Store16 { memarg } => { @@ -3613,6 +3766,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I64, + module_info, + 2 )?; } Operator::I64Store32 { memarg } => { @@ -3626,6 +3781,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::I64, + module_info, + 4 )?; } Operator::F32Const { value } => { @@ -3676,6 +3833,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F32, + module_info, + 4 )?; } Operator::F32Store { memarg } => { @@ -3689,6 +3848,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F32, + module_info, + 4 )?; } Operator::F64Load { memarg } => { @@ -3702,6 +3863,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F64, + module_info, + 8 )?; } Operator::F64Store { memarg } => { @@ -3715,6 +3878,8 @@ impl FunctionCodeGenerator for X64FunctionCode { ); }, WpType::F64, + module_info, + 8 )?; } Operator::I32ReinterpretF32 => { @@ -4813,8 +4978,7 @@ impl FunctionCodeGenerator for X64FunctionCode { LocalOrImport::Local(local_mem_index) => { let mem_desc = &module_info.memories[local_mem_index]; match mem_desc.memory_type() { - //MemoryType::Dynamic => self.native_trampolines.memory_size_dynamic_local, - MemoryType::Dynamic => unimplemented!(), + MemoryType::Dynamic => self.native_trampolines.memory_size_dynamic_local, MemoryType::Static => self.native_trampolines.memory_size_static_local, MemoryType::SharedStatic => { self.native_trampolines.memory_size_shared_local @@ -4824,8 +4988,7 @@ impl FunctionCodeGenerator for X64FunctionCode { LocalOrImport::Import(import_mem_index) => { let mem_desc = &module_info.imported_memories[import_mem_index].1; match mem_desc.memory_type() { - //MemoryType::Dynamic => self.native_trampolines.memory_size_dynamic_import, - MemoryType::Dynamic => unimplemented!(), + MemoryType::Dynamic => self.native_trampolines.memory_size_dynamic_import, MemoryType::Static => self.native_trampolines.memory_size_static_import, MemoryType::SharedStatic => { self.native_trampolines.memory_size_shared_import @@ -4841,8 +5004,7 @@ impl FunctionCodeGenerator for X64FunctionCode { LocalOrImport::Local(local_mem_index) => { let mem_desc = &module_info.memories[local_mem_index]; match mem_desc.memory_type() { - //MemoryType::Dynamic => self.native_trampolines.memory_grow_dynamic_local, - MemoryType::Dynamic => unimplemented!(), + MemoryType::Dynamic => self.native_trampolines.memory_grow_dynamic_local, MemoryType::Static => self.native_trampolines.memory_grow_static_local, MemoryType::SharedStatic => { self.native_trampolines.memory_grow_shared_local @@ -4852,8 +5014,7 @@ impl FunctionCodeGenerator for X64FunctionCode { LocalOrImport::Import(import_mem_index) => { let mem_desc = &module_info.imported_memories[import_mem_index].1; match mem_desc.memory_type() { - //MemoryType::Dynamic => self.native_trampolines.memory_grow_dynamic_import, - MemoryType::Dynamic => unimplemented!(), + MemoryType::Dynamic => self.native_trampolines.memory_grow_dynamic_import, MemoryType::Static => self.native_trampolines.memory_grow_static_import, MemoryType::SharedStatic => { self.native_trampolines.memory_grow_shared_import @@ -4868,6 +5029,7 @@ impl FunctionCodeGenerator for X64FunctionCode { &[WpType::I32], &[WpType::I32], )?; + Self::emit_update_memory_from_ctx(assembler, module_info)?; } _ => { panic!("{:?}", op); diff --git a/lib/runtime-core/src/types.rs b/lib/runtime-core/src/types.rs index b84f29683aa..c69d0edb02b 100644 --- a/lib/runtime-core/src/types.rs +++ b/lib/runtime-core/src/types.rs @@ -239,8 +239,8 @@ impl MemoryDescriptor { pub fn memory_type(self) -> MemoryType { match (self.maximum.is_some(), self.shared) { (true, true) => MemoryType::SharedStatic, - (true, false) | (false, false) => MemoryType::Static, - //(false, false) => MemoryType::Dynamic, + (true, false) => MemoryType::Static, + (false, false) => MemoryType::Dynamic, (false, true) => panic!("shared memory without a max is not allowed"), } } From 4256ccba92bbecdec07cfac1b2147969b8802186 Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 19 Mar 2019 21:30:26 +0800 Subject: [PATCH 093/100] Cleanup & fix need_check condition. --- lib/dynasm-backend/src/codegen_x64.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 4286be9d5c3..7a546e9b87f 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -560,7 +560,7 @@ impl X64ModuleCodeGenerator { impl ModuleCodeGenerator for X64ModuleCodeGenerator { - fn check_precondition(&mut self, module_info: &ModuleInfo) -> Result<(), CodegenError> { + fn check_precondition(&mut self, _module_info: &ModuleInfo) -> Result<(), CodegenError> { Ok(()) } @@ -1788,7 +1788,7 @@ impl X64FunctionCode { MemoryType::Dynamic => true, MemoryType::Static | MemoryType::SharedStatic => false, }; - if need_check || true { + if need_check { dynasm!( assembler ; movq xmm5, r14 @@ -5110,7 +5110,7 @@ unsafe extern "C" fn call_indirect( mut stack_top: *mut u8, stack_base: *mut u8, vmctx: *mut vm::Ctx, - memory_base: *mut u8, + _memory_base: *mut u8, ) -> u64 { let elem_index = *(stack_top as *mut u32) as usize; stack_top = stack_top.offset(8); From 7ee364a58b7d87c22f3a2b87c8621ead3d11fcc7 Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 19 Mar 2019 21:31:23 +0800 Subject: [PATCH 094/100] Cargo fmt. --- lib/dynasm-backend/src/codegen_x64.rs | 101 +++++++++++++++++--------- lib/dynasm-backend/src/lib.rs | 5 +- 2 files changed, 69 insertions(+), 37 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 7a546e9b87f..7a411af1da1 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -410,7 +410,12 @@ impl ProtectedCaller for X64ExecutionContext { msg: "only one linear memory is supported".into(), }); } - unsafe { ((**(*_vmctx).imported_memories).base, (**(*_vmctx).imported_memories).bound) } + unsafe { + ( + (**(*_vmctx).imported_memories).base, + (**(*_vmctx).imported_memories).bound, + ) + } } else { (::std::ptr::null_mut(), 0) }; @@ -1782,7 +1787,9 @@ impl X64FunctionCode { ) { let mem_desc = match MemoryIndex::new(0).local_or_import(module_info) { LocalOrImport::Local(local_mem_index) => &module_info.memories[local_mem_index], - LocalOrImport::Import(import_mem_index) => &module_info.imported_memories[import_mem_index].1, + LocalOrImport::Import(import_mem_index) => { + &module_info.imported_memories[import_mem_index].1 + } }; let need_check = match mem_desc.memory_type() { MemoryType::Dynamic => true, @@ -1850,7 +1857,12 @@ impl X64FunctionCode { ; pop rax ; mov eax, eax ); - Self::emit_memory_bound_check_if_needed(assembler, module_info, Register::RAX, read_size); + Self::emit_memory_bound_check_if_needed( + assembler, + module_info, + Register::RAX, + read_size, + ); dynasm!( assembler ; add rax, r15 @@ -1899,7 +1911,12 @@ impl X64FunctionCode { assembler ; mov Rd(addr_reg as u8), Rd(addr_reg as u8) ); - Self::emit_memory_bound_check_if_needed(assembler, module_info, addr_reg, write_size); + Self::emit_memory_bound_check_if_needed( + assembler, + module_info, + addr_reg, + write_size, + ); dynasm!( assembler ; add Rq(addr_reg as u8), r15 @@ -1916,7 +1933,12 @@ impl X64FunctionCode { assembler ; mov Rd(addr_reg as u8), Rd(addr_reg as u8) ); - Self::emit_memory_bound_check_if_needed(assembler, module_info, addr_reg, write_size); + Self::emit_memory_bound_check_if_needed( + assembler, + module_info, + addr_reg, + write_size, + ); dynasm!( assembler ; add Rq(addr_reg as u8), r15 @@ -1937,7 +1959,12 @@ impl X64FunctionCode { assembler ; mov ecx, ecx ); - Self::emit_memory_bound_check_if_needed(assembler, module_info, Register::RCX, write_size); + Self::emit_memory_bound_check_if_needed( + assembler, + module_info, + Register::RCX, + write_size, + ); dynasm!( assembler ; add rcx, r15 @@ -3512,7 +3539,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I32, module_info, - 4 + 4, )?; } Operator::I32Load8U { memarg } => { @@ -3527,7 +3554,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I32, module_info, - 1 + 1, )?; } Operator::I32Load8S { memarg } => { @@ -3542,7 +3569,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I32, module_info, - 1 + 1, )?; } Operator::I32Load16U { memarg } => { @@ -3557,7 +3584,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I32, module_info, - 2 + 2, )?; } Operator::I32Load16S { memarg } => { @@ -3572,7 +3599,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I32, module_info, - 2 + 2, )?; } Operator::I32Store { memarg } => { @@ -3587,7 +3614,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I32, module_info, - 4 + 4, )?; } Operator::I32Store8 { memarg } => { @@ -3602,7 +3629,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I32, module_info, - 1 + 1, )?; } Operator::I32Store16 { memarg } => { @@ -3617,7 +3644,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I32, module_info, - 2 + 2, )?; } Operator::I64Load { memarg } => { @@ -3632,7 +3659,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I64, module_info, - 8 + 8, )?; } Operator::I64Load8U { memarg } => { @@ -3647,7 +3674,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I64, module_info, - 1 + 1, )?; } Operator::I64Load8S { memarg } => { @@ -3662,7 +3689,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I64, module_info, - 1 + 1, )?; } Operator::I64Load16U { memarg } => { @@ -3677,7 +3704,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I64, module_info, - 2 + 2, )?; } Operator::I64Load16S { memarg } => { @@ -3692,7 +3719,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I64, module_info, - 2 + 2, )?; } Operator::I64Load32U { memarg } => { @@ -3707,7 +3734,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I64, module_info, - 4 + 4, )?; } Operator::I64Load32S { memarg } => { @@ -3722,7 +3749,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I64, module_info, - 4 + 4, )?; } Operator::I64Store { memarg } => { @@ -3737,7 +3764,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I64, module_info, - 8 + 8, )?; } Operator::I64Store8 { memarg } => { @@ -3752,7 +3779,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I64, module_info, - 1 + 1, )?; } Operator::I64Store16 { memarg } => { @@ -3767,7 +3794,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I64, module_info, - 2 + 2, )?; } Operator::I64Store32 { memarg } => { @@ -3782,7 +3809,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::I64, module_info, - 4 + 4, )?; } Operator::F32Const { value } => { @@ -3834,7 +3861,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::F32, module_info, - 4 + 4, )?; } Operator::F32Store { memarg } => { @@ -3849,7 +3876,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::F32, module_info, - 4 + 4, )?; } Operator::F64Load { memarg } => { @@ -3864,7 +3891,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::F64, module_info, - 8 + 8, )?; } Operator::F64Store { memarg } => { @@ -3879,7 +3906,7 @@ impl FunctionCodeGenerator for X64FunctionCode { }, WpType::F64, module_info, - 8 + 8, )?; } Operator::I32ReinterpretF32 => { @@ -4978,7 +5005,9 @@ impl FunctionCodeGenerator for X64FunctionCode { LocalOrImport::Local(local_mem_index) => { let mem_desc = &module_info.memories[local_mem_index]; match mem_desc.memory_type() { - MemoryType::Dynamic => self.native_trampolines.memory_size_dynamic_local, + MemoryType::Dynamic => { + self.native_trampolines.memory_size_dynamic_local + } MemoryType::Static => self.native_trampolines.memory_size_static_local, MemoryType::SharedStatic => { self.native_trampolines.memory_size_shared_local @@ -4988,7 +5017,9 @@ impl FunctionCodeGenerator for X64FunctionCode { LocalOrImport::Import(import_mem_index) => { let mem_desc = &module_info.imported_memories[import_mem_index].1; match mem_desc.memory_type() { - MemoryType::Dynamic => self.native_trampolines.memory_size_dynamic_import, + MemoryType::Dynamic => { + self.native_trampolines.memory_size_dynamic_import + } MemoryType::Static => self.native_trampolines.memory_size_static_import, MemoryType::SharedStatic => { self.native_trampolines.memory_size_shared_import @@ -5004,7 +5035,9 @@ impl FunctionCodeGenerator for X64FunctionCode { LocalOrImport::Local(local_mem_index) => { let mem_desc = &module_info.memories[local_mem_index]; match mem_desc.memory_type() { - MemoryType::Dynamic => self.native_trampolines.memory_grow_dynamic_local, + MemoryType::Dynamic => { + self.native_trampolines.memory_grow_dynamic_local + } MemoryType::Static => self.native_trampolines.memory_grow_static_local, MemoryType::SharedStatic => { self.native_trampolines.memory_grow_shared_local @@ -5014,7 +5047,9 @@ impl FunctionCodeGenerator for X64FunctionCode { LocalOrImport::Import(import_mem_index) => { let mem_desc = &module_info.imported_memories[import_mem_index].1; match mem_desc.memory_type() { - MemoryType::Dynamic => self.native_trampolines.memory_grow_dynamic_import, + MemoryType::Dynamic => { + self.native_trampolines.memory_grow_dynamic_import + } MemoryType::Static => self.native_trampolines.memory_grow_static_import, MemoryType::SharedStatic => { self.native_trampolines.memory_grow_shared_import diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs index 5eb91f8e5fb..1647611bb3e 100644 --- a/lib/dynasm-backend/src/lib.rs +++ b/lib/dynasm-backend/src/lib.rs @@ -24,14 +24,11 @@ mod stack; use crate::codegen::{CodegenError, ModuleCodeGenerator}; use crate::parse::LoadError; -use std::ptr::NonNull; use wasmer_runtime_core::{ - backend::{sys::Memory, Backend, CacheGen, Compiler, FuncResolver, Token}, + backend::{sys::Memory, Backend, CacheGen, Compiler, Token}, cache::{Artifact, Error as CacheError}, error::{CompileError, CompileResult}, module::{ModuleInfo, ModuleInner}, - types::LocalFuncIndex, - vm, }; struct Placeholder; From fd606315a4fb37b03616126f93d5ca54c45d75f2 Mon Sep 17 00:00:00 2001 From: losfair Date: Tue, 19 Mar 2019 22:00:50 +0800 Subject: [PATCH 095/100] Remove '+nightly' in Makefile. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 46eae3a1982..dbc23c772c8 100644 --- a/Makefile +++ b/Makefile @@ -54,7 +54,7 @@ test-emscripten-nightly: release: # If you are in OS-X, you will need mingw-w64 for cross compiling to windows # brew install mingw-w64 - cargo +nightly build --release + cargo build --release debug-release: cargo build --release --features debug From 82b2034f254b0c2f2f0850ea74827c3f3047bc69 Mon Sep 17 00:00:00 2001 From: losfair Date: Wed, 20 Mar 2019 01:19:50 +0800 Subject: [PATCH 096/100] Run clippy on nightly rust. --- .circleci/config.yml | 3 ++- Makefile | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 42c643c47df..52d22736c0b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -18,8 +18,9 @@ jobs: - run: name: Install lint deps command: | + rustup toolchain install nightly rustup component add rustfmt - rustup component add clippy + rustup component add clippy --toolchain=nightly || cargo +nightly install --git https://github.com/rust-lang/rust-clippy/ --force clippy - run: name: Execute lints command: | diff --git a/Makefile b/Makefile index dbc23c772c8..871245de23a 100644 --- a/Makefile +++ b/Makefile @@ -28,7 +28,7 @@ integration-tests: release lint: cargo fmt --all -- --check - # cargo clippy --all + cargo +nightly clippy --all precommit: lint test From 8b85099fc85d884016e28639210705813e42c11e Mon Sep 17 00:00:00 2001 From: losfair Date: Wed, 20 Mar 2019 01:33:23 +0800 Subject: [PATCH 097/100] Unset global git redirection. --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 52d22736c0b..f2616d0d3fb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -18,6 +18,7 @@ jobs: - run: name: Install lint deps command: | + git config --global --unset url."ssh://git@github.com".insteadOf || true rustup toolchain install nightly rustup component add rustfmt rustup component add clippy --toolchain=nightly || cargo +nightly install --git https://github.com/rust-lang/rust-clippy/ --force clippy From 295efbf3a9fdbfd56edcc669c810fe22b14c152b Mon Sep 17 00:00:00 2001 From: losfair Date: Wed, 20 Mar 2019 01:52:00 +0800 Subject: [PATCH 098/100] Fix clippy errors. --- lib/dynasm-backend/src/codegen_x64.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index 7a411af1da1..d815c704246 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -1,3 +1,5 @@ +#![allow(clippy::forget_copy)] // Used by dynasm. + use super::codegen::*; use super::stack::{ ControlFrame, ControlStack, IfElseState, ScratchRegister, ValueInfo, ValueLocation, ValueStack, @@ -5139,6 +5141,7 @@ enum CallIndirectLocalOrImport { Import, } +#[allow(clippy::cast_ptr_alignment)] unsafe extern "C" fn call_indirect( sig_index: usize, local_or_import: CallIndirectLocalOrImport, @@ -5214,6 +5217,7 @@ unsafe extern "C" fn _memory_size( ret.0 as u32 as u64 } +#[allow(clippy::cast_ptr_alignment)] unsafe extern "C" fn _memory_grow( op: MemoryKind, index: usize, From cb3846ff6a0cb4facd8deb131af214d1eacad233 Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 21 Mar 2019 08:39:06 +0800 Subject: [PATCH 099/100] Add `InternalCtx`. --- lib/dynasm-backend/src/codegen_x64.rs | 38 +++++++++------- lib/runtime-core/src/vm.rs | 64 ++++++++++++++++----------- lib/runtime-core/src/vmcalls.rs | 22 +++++---- 3 files changed, 76 insertions(+), 48 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index d815c704246..f8aabf04345 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -405,7 +405,12 @@ impl ProtectedCaller for X64ExecutionContext { msg: "only one linear memory is supported".into(), }); } - unsafe { ((**(*_vmctx).memories).base, (**(*_vmctx).memories).bound) } + unsafe { + ( + (**(*_vmctx).internal.memories).base, + (**(*_vmctx).internal.memories).bound, + ) + } } else if _module.info.imported_memories.len() > 0 { if _module.info.memories.len() != 0 || _module.info.imported_memories.len() != 1 { return Err(RuntimeError::Trap { @@ -414,8 +419,8 @@ impl ProtectedCaller for X64ExecutionContext { } unsafe { ( - (**(*_vmctx).imported_memories).base, - (**(*_vmctx).imported_memories).bound, + (**(*_vmctx).internal.imported_memories).base, + (**(*_vmctx).internal.imported_memories).bound, ) } } else { @@ -1394,7 +1399,7 @@ impl X64FunctionCode { } dynasm!( assembler - ; mov r15, r14 => vm::Ctx.memories + ; mov r15, r14 => vm::InternalCtx.memories ); } else if info.imported_memories.len() > 0 { if info.memories.len() != 0 || info.imported_memories.len() != 1 { @@ -1404,7 +1409,7 @@ impl X64FunctionCode { } dynasm!( assembler - ; mov r15, r14 => vm::Ctx.imported_memories + ; mov r15, r14 => vm::InternalCtx.imported_memories ); } else { return Ok(()); @@ -1477,7 +1482,7 @@ impl X64FunctionCode { } dynasm!( assembler - ; mov rcx, r9 => vm::Ctx.memories + ; mov rcx, r9 => vm::InternalCtx.memories ); true } else if info.imported_memories.len() > 0 { @@ -1488,7 +1493,7 @@ impl X64FunctionCode { } dynasm!( assembler - ; mov rcx, r9 => vm::Ctx.imported_memories + ; mov rcx, r9 => vm::InternalCtx.imported_memories ); true } else { @@ -2101,7 +2106,7 @@ impl FunctionCodeGenerator for X64FunctionCode { if global_index < module_info.imported_globals.len() { dynasm!( assembler - ; mov rax, r14 => vm::Ctx.imported_globals + ; mov rax, r14 => vm::InternalCtx.imported_globals ); } else { global_index -= module_info.imported_globals.len(); @@ -2112,7 +2117,7 @@ impl FunctionCodeGenerator for X64FunctionCode { } dynasm!( assembler - ; mov rax, r14 => vm::Ctx.globals + ; mov rax, r14 => vm::InternalCtx.globals ); } @@ -2139,7 +2144,7 @@ impl FunctionCodeGenerator for X64FunctionCode { dynasm!( assembler ; push rbx - ; mov rbx, r14 => vm::Ctx.imported_globals + ; mov rbx, r14 => vm::InternalCtx.imported_globals ); } else { global_index -= module_info.imported_globals.len(); @@ -2151,7 +2156,7 @@ impl FunctionCodeGenerator for X64FunctionCode { dynasm!( assembler ; push rbx - ; mov rbx, r14 => vm::Ctx.globals + ; mov rbx, r14 => vm::InternalCtx.globals ); } @@ -5129,7 +5134,7 @@ unsafe extern "C" fn invoke_import( _memory_base: *mut u8, ) -> u64 { let vmctx: &mut vm::Ctx = &mut *vmctx; - let import = (*vmctx.imported_funcs.offset(import_id as isize)).func; + let import = (*vmctx.internal.imported_funcs.offset(import_id as isize)).func; CONSTRUCT_STACK_AND_CALL_NATIVE(stack_top, stack_base, vmctx, import) } @@ -5155,15 +5160,18 @@ unsafe extern "C" fn call_indirect( assert!(stack_top as usize <= stack_base as usize); let table: &LocalTable = match local_or_import { - CallIndirectLocalOrImport::Local => &*(*(*vmctx).tables), - CallIndirectLocalOrImport::Import => &*(*(*vmctx).imported_tables), + CallIndirectLocalOrImport::Local => &*(*(*vmctx).internal.tables), + CallIndirectLocalOrImport::Import => &*(*(*vmctx).internal.imported_tables), }; if elem_index >= table.count as usize { eprintln!("element index out of bounds"); protect_unix::trigger_trap(); } let anyfunc = &*(table.base as *mut vm::Anyfunc).offset(elem_index as isize); - let dynamic_sigindex = *(*vmctx).dynamic_sigindices.offset(sig_index as isize); + let dynamic_sigindex = *(*vmctx) + .internal + .dynamic_sigindices + .offset(sig_index as isize); if anyfunc.func.is_null() { eprintln!("null anyfunc"); diff --git a/lib/runtime-core/src/vm.rs b/lib/runtime-core/src/vm.rs index 6a5231da4ad..3dcde766fe0 100644 --- a/lib/runtime-core/src/vm.rs +++ b/lib/runtime-core/src/vm.rs @@ -13,6 +13,25 @@ use std::{ffi::c_void, mem, ptr}; #[derive(Debug)] #[repr(C)] pub struct Ctx { + // `internal` must be the first field of `Ctx`. + pub internal: InternalCtx, + + pub(crate) local_functions: *const *const Func, + + local_backing: *mut LocalBacking, + import_backing: *mut ImportBacking, + module: *const ModuleInner, + + pub data: *mut c_void, + pub data_finalizer: Option, +} + +/// The internal context of the currently running WebAssembly instance. +/// +/// +#[derive(Debug)] +#[repr(C)] +pub struct InternalCtx { /// A pointer to an array of locally-defined memories, indexed by `MemoryIndex`. pub memories: *mut *mut LocalMemory, @@ -39,15 +58,6 @@ pub struct Ctx { /// signature id. This is used to allow call-indirect to other /// modules safely. pub dynamic_sigindices: *const SigId, - - pub(crate) local_functions: *const *const Func, - - local_backing: *mut LocalBacking, - import_backing: *mut ImportBacking, - module: *const ModuleInner, - - pub data: *mut c_void, - pub data_finalizer: Option, } impl Ctx { @@ -58,16 +68,18 @@ impl Ctx { module: &ModuleInner, ) -> Self { Self { - memories: local_backing.vm_memories.as_mut_ptr(), - tables: local_backing.vm_tables.as_mut_ptr(), - globals: local_backing.vm_globals.as_mut_ptr(), + internal: InternalCtx { + memories: local_backing.vm_memories.as_mut_ptr(), + tables: local_backing.vm_tables.as_mut_ptr(), + globals: local_backing.vm_globals.as_mut_ptr(), - imported_memories: import_backing.vm_memories.as_mut_ptr(), - imported_tables: import_backing.vm_tables.as_mut_ptr(), - imported_globals: import_backing.vm_globals.as_mut_ptr(), - imported_funcs: import_backing.vm_functions.as_mut_ptr(), + imported_memories: import_backing.vm_memories.as_mut_ptr(), + imported_tables: import_backing.vm_tables.as_mut_ptr(), + imported_globals: import_backing.vm_globals.as_mut_ptr(), + imported_funcs: import_backing.vm_functions.as_mut_ptr(), - dynamic_sigindices: local_backing.dynamic_sigindices.as_ptr(), + dynamic_sigindices: local_backing.dynamic_sigindices.as_ptr(), + }, local_functions: local_backing.local_functions.as_ptr(), local_backing, @@ -88,16 +100,18 @@ impl Ctx { data_finalizer: extern "C" fn(*mut c_void), ) -> Self { Self { - memories: local_backing.vm_memories.as_mut_ptr(), - tables: local_backing.vm_tables.as_mut_ptr(), - globals: local_backing.vm_globals.as_mut_ptr(), + internal: InternalCtx { + memories: local_backing.vm_memories.as_mut_ptr(), + tables: local_backing.vm_tables.as_mut_ptr(), + globals: local_backing.vm_globals.as_mut_ptr(), - imported_memories: import_backing.vm_memories.as_mut_ptr(), - imported_tables: import_backing.vm_tables.as_mut_ptr(), - imported_globals: import_backing.vm_globals.as_mut_ptr(), - imported_funcs: import_backing.vm_functions.as_mut_ptr(), + imported_memories: import_backing.vm_memories.as_mut_ptr(), + imported_tables: import_backing.vm_tables.as_mut_ptr(), + imported_globals: import_backing.vm_globals.as_mut_ptr(), + imported_funcs: import_backing.vm_functions.as_mut_ptr(), - dynamic_sigindices: local_backing.dynamic_sigindices.as_ptr(), + dynamic_sigindices: local_backing.dynamic_sigindices.as_ptr(), + }, local_functions: local_backing.local_functions.as_ptr(), local_backing, diff --git a/lib/runtime-core/src/vmcalls.rs b/lib/runtime-core/src/vmcalls.rs index b428fb24e11..4126024bf83 100644 --- a/lib/runtime-core/src/vmcalls.rs +++ b/lib/runtime-core/src/vmcalls.rs @@ -17,7 +17,7 @@ pub unsafe extern "C" fn local_static_memory_grow( memory_index: LocalMemoryIndex, delta: Pages, ) -> i32 { - let local_memory = *ctx.memories.add(memory_index.index()); + let local_memory = *ctx.internal.memories.add(memory_index.index()); let memory = (*local_memory).memory as *mut StaticMemory; match (*memory).grow(delta, &mut *local_memory) { @@ -30,7 +30,7 @@ pub unsafe extern "C" fn local_static_memory_size( ctx: &vm::Ctx, memory_index: LocalMemoryIndex, ) -> Pages { - let local_memory = *ctx.memories.add(memory_index.index()); + let local_memory = *ctx.internal.memories.add(memory_index.index()); let memory = (*local_memory).memory as *mut StaticMemory; (*memory).size() @@ -41,7 +41,7 @@ pub unsafe extern "C" fn local_dynamic_memory_grow( memory_index: LocalMemoryIndex, delta: Pages, ) -> i32 { - let local_memory = *ctx.memories.add(memory_index.index()); + let local_memory = *ctx.internal.memories.add(memory_index.index()); let memory = (*local_memory).memory as *mut DynamicMemory; match (*memory).grow(delta, &mut *local_memory) { @@ -54,7 +54,7 @@ pub unsafe extern "C" fn local_dynamic_memory_size( ctx: &vm::Ctx, memory_index: LocalMemoryIndex, ) -> Pages { - let local_memory = *ctx.memories.add(memory_index.index()); + let local_memory = *ctx.internal.memories.add(memory_index.index()); let memory = (*local_memory).memory as *mut DynamicMemory; (*memory).size() @@ -69,7 +69,10 @@ pub unsafe extern "C" fn imported_static_memory_grow( import_memory_index: ImportedMemoryIndex, delta: Pages, ) -> i32 { - let local_memory = *ctx.imported_memories.add(import_memory_index.index()); + let local_memory = *ctx + .internal + .imported_memories + .add(import_memory_index.index()); let memory = (*local_memory).memory as *mut StaticMemory; match (*memory).grow(delta, &mut *local_memory) { @@ -82,7 +85,10 @@ pub unsafe extern "C" fn imported_static_memory_size( ctx: &vm::Ctx, import_memory_index: ImportedMemoryIndex, ) -> Pages { - let local_memory = *ctx.imported_memories.add(import_memory_index.index()); + let local_memory = *ctx + .internal + .imported_memories + .add(import_memory_index.index()); let memory = (*local_memory).memory as *mut StaticMemory; (*memory).size() @@ -93,7 +99,7 @@ pub unsafe extern "C" fn imported_dynamic_memory_grow( memory_index: ImportedMemoryIndex, delta: Pages, ) -> i32 { - let local_memory = *ctx.imported_memories.add(memory_index.index()); + let local_memory = *ctx.internal.imported_memories.add(memory_index.index()); let memory = (*local_memory).memory as *mut DynamicMemory; match (*memory).grow(delta, &mut *local_memory) { @@ -106,7 +112,7 @@ pub unsafe extern "C" fn imported_dynamic_memory_size( ctx: &vm::Ctx, memory_index: ImportedMemoryIndex, ) -> Pages { - let local_memory = *ctx.imported_memories.add(memory_index.index()); + let local_memory = *ctx.internal.imported_memories.add(memory_index.index()); let memory = (*local_memory).memory as *mut DynamicMemory; (*memory).size() From 26e4278657db492ddc81d3a97762c6247d4af92f Mon Sep 17 00:00:00 2001 From: losfair Date: Thu, 21 Mar 2019 08:57:50 +0800 Subject: [PATCH 100/100] Make the `internal` field private from outside. --- lib/dynasm-backend/src/codegen_x64.rs | 28 +++++++++++++-------------- lib/runtime-core/src/vm.rs | 3 ++- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs index f8aabf04345..ef056a69564 100644 --- a/lib/dynasm-backend/src/codegen_x64.rs +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -406,10 +406,8 @@ impl ProtectedCaller for X64ExecutionContext { }); } unsafe { - ( - (**(*_vmctx).internal.memories).base, - (**(*_vmctx).internal.memories).bound, - ) + let vmctx = _vmctx as *mut vm::InternalCtx; + ((**(*vmctx).memories).base, (**(*vmctx).memories).bound) } } else if _module.info.imported_memories.len() > 0 { if _module.info.memories.len() != 0 || _module.info.imported_memories.len() != 1 { @@ -418,9 +416,10 @@ impl ProtectedCaller for X64ExecutionContext { }); } unsafe { + let vmctx = _vmctx as *mut vm::InternalCtx; ( - (**(*_vmctx).internal.imported_memories).base, - (**(*_vmctx).internal.imported_memories).bound, + (**(*vmctx).imported_memories).base, + (**(*vmctx).imported_memories).bound, ) } } else { @@ -5130,13 +5129,13 @@ unsafe extern "C" fn invoke_import( import_id: usize, stack_top: *mut u8, stack_base: *mut u8, - vmctx: *mut vm::Ctx, + _vmctx: *mut vm::Ctx, _memory_base: *mut u8, ) -> u64 { - let vmctx: &mut vm::Ctx = &mut *vmctx; - let import = (*vmctx.internal.imported_funcs.offset(import_id as isize)).func; + let vmctx: &mut vm::InternalCtx = &mut *(_vmctx as *mut vm::InternalCtx); + let import = (*vmctx.imported_funcs.offset(import_id as isize)).func; - CONSTRUCT_STACK_AND_CALL_NATIVE(stack_top, stack_base, vmctx, import) + CONSTRUCT_STACK_AND_CALL_NATIVE(stack_top, stack_base, _vmctx, import) } #[repr(u64)] @@ -5160,16 +5159,17 @@ unsafe extern "C" fn call_indirect( assert!(stack_top as usize <= stack_base as usize); let table: &LocalTable = match local_or_import { - CallIndirectLocalOrImport::Local => &*(*(*vmctx).internal.tables), - CallIndirectLocalOrImport::Import => &*(*(*vmctx).internal.imported_tables), + CallIndirectLocalOrImport::Local => &*(*(*(vmctx as *mut vm::InternalCtx)).tables), + CallIndirectLocalOrImport::Import => { + &*(*(*(vmctx as *mut vm::InternalCtx)).imported_tables) + } }; if elem_index >= table.count as usize { eprintln!("element index out of bounds"); protect_unix::trigger_trap(); } let anyfunc = &*(table.base as *mut vm::Anyfunc).offset(elem_index as isize); - let dynamic_sigindex = *(*vmctx) - .internal + let dynamic_sigindex = *(*(vmctx as *mut vm::InternalCtx)) .dynamic_sigindices .offset(sig_index as isize); diff --git a/lib/runtime-core/src/vm.rs b/lib/runtime-core/src/vm.rs index 3dcde766fe0..8d2721bdd6a 100644 --- a/lib/runtime-core/src/vm.rs +++ b/lib/runtime-core/src/vm.rs @@ -14,7 +14,7 @@ use std::{ffi::c_void, mem, ptr}; #[repr(C)] pub struct Ctx { // `internal` must be the first field of `Ctx`. - pub internal: InternalCtx, + pub(crate) internal: InternalCtx, pub(crate) local_functions: *const *const Func, @@ -29,6 +29,7 @@ pub struct Ctx { /// The internal context of the currently running WebAssembly instance. /// /// +#[doc(hidden)] #[derive(Debug)] #[repr(C)] pub struct InternalCtx {