diff --git a/.circleci/config.yml b/.circleci/config.yml index d99ce0f3007..26ca0005109 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -18,8 +18,10 @@ jobs: - run: name: Install lint deps command: | + git config --global --unset url."ssh://git@github.com".insteadOf || true + rustup toolchain install nightly rustup component add rustfmt - rustup component add clippy + rustup component add clippy --toolchain=nightly || cargo +nightly install --git https://github.com/rust-lang/rust-clippy/ --force clippy - run: name: Execute lints command: | @@ -274,6 +276,9 @@ jobs: - run: | export LLVM_SYS_70_PREFIX="`pwd`/clang+llvm-7.0.0-x86_64-linux-gnu-ubuntu-16.04/" make test + make test-nightly + make test-emscripten + make test-emscripten-nightly - save_cache: paths: - /usr/local/cargo/registry diff --git a/Cargo.lock b/Cargo.lock index a6d14b571b0..e6421e1c05e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,3 +1,5 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. [[package]] name = "aho-corasick" version = "0.6.10" @@ -367,6 +369,30 @@ dependencies = [ "generic-array 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "dynasm" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.29 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "dynasmrt" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "take_mut 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "either" version = "1.5.1" @@ -605,6 +631,15 @@ dependencies = [ "libc 0.2.50 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "memmap" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.50 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "memmap" version = "0.7.0" @@ -670,6 +705,14 @@ dependencies = [ "libc 0.2.50 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "owning_ref" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "owning_ref" version = "0.4.0" @@ -1112,6 +1155,11 @@ dependencies = [ "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "take_mut" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "target-lexicon" version = "0.2.0" @@ -1315,6 +1363,18 @@ dependencies = [ "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "wasmer-dynasm-backend" +version = "0.1.0" +dependencies = [ + "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "dynasm 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "dynasmrt 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "wasmer-runtime-core 0.2.1", + "wasmparser 0.28.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "wasmer-emscripten" version = "0.2.1" @@ -1327,6 +1387,7 @@ dependencies = [ "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", "wabt 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", "wasmer-clif-backend 0.2.0", + "wasmer-dynasm-backend 0.1.0", "wasmer-llvm-backend 0.1.0", "wasmer-runtime-core 0.2.1", ] @@ -1362,6 +1423,7 @@ dependencies = [ "tempfile 3.0.7 (registry+https://github.com/rust-lang/crates.io-index)", "wabt 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", "wasmer-clif-backend 0.2.0", + "wasmer-dynasm-backend 0.1.0", "wasmer-llvm-backend 0.1.0", "wasmer-runtime-core 0.2.1", ] @@ -1406,6 +1468,7 @@ version = "0.2.0" dependencies = [ "wabt 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", "wasmer-clif-backend 0.2.0", + "wasmer-dynasm-backend 0.1.0", "wasmer-llvm-backend 0.1.0", "wasmer-runtime-core 0.2.1", ] @@ -1532,6 +1595,8 @@ dependencies = [ "checksum csv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "9fd1c44c58078cfbeaf11fbb3eac9ae5534c23004ed770cc4bfb48e658ae4f04" "checksum csv-core 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "fa5cdef62f37e6ffe7d1f07a381bc0db32b7a3ff1cac0de56cb0d81e71f53d65" "checksum digest 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05f47366984d3ad862010e22c7ce81a7dbcaebbdfb37241a620f8b6596ee135c" +"checksum dynasm 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b77e128faecc4d16cff7cae96c0c9e809f687f748a0dbc4d017996e48240a991" +"checksum dynasmrt 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a4c408a211e7f5762829f5e46bdff0c14bc3b1517a21a4bb781c716bf88b0c68" "checksum either 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c67353c641dc847124ea1902d69bd753dee9bb3beff9aa3662ecf86c971d1fac" "checksum enum-methods 0.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7798e7da2d4cb0d6d6fc467e8d6b5bf247e9e989f786dde1732d79899c32bb10" "checksum env_logger 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b61fa891024a945da30a9581546e8cfaf5602c7b3f4c137a2805cf388f92075a" @@ -1562,6 +1627,7 @@ dependencies = [ "checksum lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" "checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6" "checksum memchr 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2efc7bc57c883d4a4d6e3246905283d8dae951bb3bd32f49d6ef297f546e1c39" +"checksum memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e2ffa2c986de11a9df78620c01eeaaf27d94d3ff02bf81bfcca953102dd0c6ff" "checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" "checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3" "checksum nix 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "921f61dc817b379d0834e45d5ec45beaacfae97082090a49c2cf30dcbc30206f" @@ -1570,6 +1636,7 @@ dependencies = [ "checksum nom 4.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "22293d25d3f33a8567cc8a1dc20f40c7eeb761ce83d0fcca059858580790cac3" "checksum num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1" "checksum num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1a23f0ed30a54abaa0c7e83b1d2d87ada7c3c23078d1d87815af3e3b6385fbba" +"checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37" "checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13" "checksum page_size 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f89ef58b3d32420dbd1a43d2f38ae92f6239ef12bb556ab09ca55445f5a67242" "checksum parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" @@ -1623,6 +1690,7 @@ dependencies = [ "checksum syn 0.15.29 (registry+https://github.com/rust-lang/crates.io-index)" = "1825685f977249735d510a242a6727b46efe914bb67e38d30c071b1b72b1d5c2" "checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" "checksum synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "73687139bf99285483c96ac0add482c3776528beac1d97d444f6e91f203a2015" +"checksum take_mut 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" "checksum target-lexicon 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4af5e2227f0b887d591d3724b796a96eff04226104d872f5b3883fcd427d64b9" "checksum tempfile 3.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "b86c784c88d98c801132806dadd3819ed29d8600836c4088e855cdf3e178ed8a" "checksum termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4096add70612622289f2fdcdbd5086dc81c1e2675e6ae58d6c4f62a16c6d7f2f" diff --git a/Cargo.toml b/Cargo.toml index 468ebb83269..e0d9a91d26e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,12 +25,11 @@ wasmer-clif-backend = { path = "lib/clif-backend" } wasmer-runtime = { path = "lib/runtime" } wasmer-runtime-core = { path = "lib/runtime-core" } wasmer-emscripten = { path = "lib/emscripten" } - -[target.'cfg(not(windows))'.dependencies] wasmer-llvm-backend = { path = "lib/llvm-backend", optional = true } +wasmer-dynasm-backend = { path = "lib/dynasm-backend", optional = true } [workspace] -members = ["lib/clif-backend", "lib/runtime", "lib/runtime-core", "lib/emscripten", "lib/spectests", "lib/win-exception-handler", "lib/runtime-c-api", "lib/llvm-backend"] +members = ["lib/clif-backend", "lib/dynasm-backend", "lib/runtime", "lib/runtime-core", "lib/emscripten", "lib/spectests", "lib/win-exception-handler", "lib/runtime-c-api", "lib/llvm-backend"] [build-dependencies] wabt = "0.7.2" @@ -42,3 +41,4 @@ default = ["fast-tests"] # This feature will allow cargo test to run much faster fast-tests = [] llvm = ["wasmer-llvm-backend"] +dynasm = ["wasmer-dynasm-backend"] \ No newline at end of file diff --git a/Makefile b/Makefile index a9de5a0e4db..c869155b8cc 100644 --- a/Makefile +++ b/Makefile @@ -28,23 +28,29 @@ integration-tests: release lint: cargo fmt --all -- --check - cargo clippy --all + cargo +nightly clippy --all precommit: lint test test: # We use one thread so the emscripten stdouts doesn't collide - cargo test --all --exclude wasmer-runtime-c-api --exclude wasmer-emscripten --exclude wasmer-spectests -- $(runargs) + cargo test --all --exclude wasmer-runtime-c-api --exclude wasmer-emscripten --exclude wasmer-spectests --exclude wasmer-dynasm-backend -- $(runargs) # cargo test --all --exclude wasmer-emscripten -- --test-threads=1 $(runargs) cargo test --manifest-path lib/spectests/Cargo.toml --features clif cargo test --manifest-path lib/spectests/Cargo.toml --features llvm cargo build -p wasmer-runtime-c-api cargo test -p wasmer-runtime-c-api -- --nocapture +test-nightly: + cargo test --manifest-path lib/spectests/Cargo.toml --features dynasm + test-emscripten: cargo test --manifest-path lib/emscripten/Cargo.toml --features clif -- --test-threads=1 $(runargs) cargo test --manifest-path lib/emscripten/Cargo.toml --features llvm -- --test-threads=1 $(runargs) +test-emscripten-nightly: + cargo test --manifest-path lib/emscripten/Cargo.toml --features dynasm -- --test-threads=1 $(runargs) + release: # If you are in OS-X, you will need mingw-w64 for cross compiling to windows # brew install mingw-w64 diff --git a/examples/single_pass_tests/br_table.wat b/examples/single_pass_tests/br_table.wat new file mode 100644 index 00000000000..72e8f92fe6d --- /dev/null +++ b/examples/single_pass_tests/br_table.wat @@ -0,0 +1,37 @@ +(module + (func $main (export "main") + (i32.eq (call $test (i32.const 0)) (i32.const 2)) + (i32.eq (call $test (i32.const 1)) (i32.const 0)) + (i32.eq (call $test (i32.const 2)) (i32.const 1)) + (i32.eq (call $test (i32.const 3)) (i32.const 3)) + (i32.eq (call $test (i32.const 4)) (i32.const 3)) + (i32.and) + (i32.and) + (i32.and) + (i32.and) + (i32.const 1) + (i32.eq) + (br_if 0) + (unreachable) + ) + + (func $test (param $p i32) (result i32) + (block + (block + (block + (block + (block + (get_local $p) + (br_table 2 0 1 3) + ) + (return (i32.const 0)) + ) + (return (i32.const 1)) + ) + (return (i32.const 2)) + ) + (return (i32.const 3)) + ) + (unreachable) + ) +) diff --git a/examples/single_pass_tests/call.wat b/examples/single_pass_tests/call.wat new file mode 100644 index 00000000000..2986ac11a47 --- /dev/null +++ b/examples/single_pass_tests/call.wat @@ -0,0 +1,23 @@ +(module + (func $main (export "main") + (local $a i32) + (block + (set_local $a (i32.const 33)) + (i32.const 11) + (call $foo (get_local $a)) + (i32.add) + (i32.const 86) + (i32.eq) + (br_if 0) + (unreachable) + ) + ) + + (func $foo (param $input i32) (result i32) + (local $a i32) + (set_local $a (i32.const 42)) + (get_local $a) + (get_local $input) + (i32.add) + ) +) diff --git a/examples/single_pass_tests/call_indirect.wat b/examples/single_pass_tests/call_indirect.wat new file mode 100644 index 00000000000..019f045e8d4 --- /dev/null +++ b/examples/single_pass_tests/call_indirect.wat @@ -0,0 +1,25 @@ +(module + (type $binop (func (param i32 i32) (result i32))) + (table 1 100 anyfunc) + (elem (i32.const 5) $sub) + (elem (i32.const 10) $add) + + (func $main (export "main") + (if (i32.eq (call_indirect (type $binop) (i32.const 42) (i32.const 1) (i32.const 10)) (i32.const 43)) + (then) + (else unreachable) + ) + (if (i32.eq (call_indirect (type $binop) (i32.const 42) (i32.const 1) (i32.const 5)) (i32.const 41)) + (then) + (else unreachable) + ) + ) + + (func $add (param i32) (param i32) (result i32) + (i32.add (get_local 0) (get_local 1)) + ) + + (func $sub (param i32) (param i32) (result i32) + (i32.sub (get_local 0) (get_local 1)) + ) +) diff --git a/examples/single_pass_tests/div.wat b/examples/single_pass_tests/div.wat new file mode 100644 index 00000000000..3ef5d6e53f1 --- /dev/null +++ b/examples/single_pass_tests/div.wat @@ -0,0 +1,36 @@ +(module + (func $main (export "main") + (i32.const 1) + (if (i32.ne (i32.div_s (i32.const 2) (i32.const -1)) (i32.const -2)) + (then unreachable) + ) + (i32.const 2) + (if (i32.ne (i32.div_u (i32.const 2) (i32.const -1)) (i32.const 0)) + (then unreachable) + ) + (i32.const 3) + (if (i32.ne (i32.div_u (i32.const 10) (i32.const 5)) (i32.const 2)) + (then unreachable) + ) + (i32.const 4) + (if (i64.ne (i64.div_s (i64.const 300000000000) (i64.const -1)) (i64.const -300000000000)) + (then unreachable) + ) + (i32.const 5) + (if (i64.ne (i64.div_u (i64.const 300000000000) (i64.const -1)) (i64.const 0)) + (then unreachable) + ) + (i32.const 6) + (if (i64.ne (i64.div_u (i64.const 300000000000) (i64.const 2)) (i64.const 150000000000)) + (then unreachable) + ) + (i32.add) + (i32.add) + (i32.add) + (i32.add) + (i32.add) + (if (i32.ne (i32.const 21)) + (then unreachable) + ) + ) +) diff --git a/examples/single_pass_tests/global.wat b/examples/single_pass_tests/global.wat new file mode 100644 index 00000000000..f06e15302e2 --- /dev/null +++ b/examples/single_pass_tests/global.wat @@ -0,0 +1,26 @@ +(module + (global $g1 (mut i32) (i32.const 0)) + (global $g2 (mut i32) (i32.const 99)) + (func $main (export "main") + (if (i32.eq (get_global $g1) (i32.const 0)) + (then) + (else unreachable) + ) + (if (i32.eq (get_global $g2) (i32.const 99)) + (then) + (else unreachable) + ) + + (set_global $g1 (i32.add (get_global $g1) (i32.const 1))) + (set_global $g2 (i32.sub (get_global $g2) (i32.const 1))) + + (if (i32.eq (get_global $g1) (i32.const 1)) + (then) + (else unreachable) + ) + (if (i32.eq (get_global $g2) (i32.const 98)) + (then) + (else unreachable) + ) + ) +) diff --git a/examples/single_pass_tests/i32.wat b/examples/single_pass_tests/i32.wat new file mode 100644 index 00000000000..66daadcc9c8 --- /dev/null +++ b/examples/single_pass_tests/i32.wat @@ -0,0 +1,44 @@ +(module + (func $main (export "main") (result i32) + (local $v1 i32) + (block + (i32.const 10) + (set_local $v1) + + (i32.const 42) + (get_local $v1) + (i32.add) + (i32.const 53) + (i32.eq) + (br_if 0) + + (i32.const 1) + (i32.const -100) + (i32.const 41) + (i32.lt_s) + (i32.sub) + (br_if 0) + + (i32.const -100) + (i32.const 41) + (i32.lt_u) + (br_if 0) + + (i32.const 1) + (i32.const 100) + (i32.const -41) + (i32.gt_s) + (i32.sub) + (br_if 0) + + (i32.const 100) + (i32.const -41) + (i32.gt_u) + (br_if 0) + + (i32.const 0) + (return) + ) + (unreachable) + ) +) diff --git a/examples/single_pass_tests/i64.wat b/examples/single_pass_tests/i64.wat new file mode 100644 index 00000000000..ebe10eb99bd --- /dev/null +++ b/examples/single_pass_tests/i64.wat @@ -0,0 +1,48 @@ +(module + (func $main (export "main") (result i64) + (local $v1 i64) + (block + (i64.const 10) + (set_local $v1) + + (i64.const 42) + (get_local $v1) + (i64.add) + (i64.const 53) + (i64.eq) + (br_if 0) + + (i64.const 1) + (i64.const -100) + (i64.const 41) + (i64.lt_s) + (i64.extend_u/i32) + (i64.sub) + (i32.wrap/i64) + (br_if 0) + + (i64.const -100) + (i64.const 41) + (i64.lt_u) + (br_if 0) + + (i64.const 1) + (i64.const 100) + (i64.const -41) + (i64.gt_s) + (i64.extend_u/i32) + (i64.sub) + (i32.wrap/i64) + (br_if 0) + + (i64.const 100) + (i64.const -41) + (i64.gt_u) + (br_if 0) + + (i64.const 0) + (return) + ) + (unreachable) + ) +) diff --git a/examples/single_pass_tests/if_else.wat b/examples/single_pass_tests/if_else.wat new file mode 100644 index 00000000000..533b8f13df7 --- /dev/null +++ b/examples/single_pass_tests/if_else.wat @@ -0,0 +1,33 @@ +(module + (func $main (export "main") + (local $a i32) + (set_local $a (i32.const 33)) + + (block + (call $foo (if (result i32) (i32.eq (get_local $a) (i32.const 33)) + (then (i32.const 1)) + (else (i32.const 2)) + )) + (i32.eq (i32.const 43)) + (br_if 0) + (unreachable) + ) + (block + (call $foo (if (result i32) (i32.eq (get_local $a) (i32.const 30)) + (then (i32.const 1)) + (else (i32.const 2)) + )) + (i32.eq (i32.const 44)) + (br_if 0) + (unreachable) + ) + ) + + (func $foo (param $input i32) (result i32) + (local $a i32) + (set_local $a (i32.const 42)) + (get_local $a) + (get_local $input) + (i32.add) + ) +) diff --git a/examples/single_pass_tests/loop.wat b/examples/single_pass_tests/loop.wat new file mode 100644 index 00000000000..dfdc1b1d58e --- /dev/null +++ b/examples/single_pass_tests/loop.wat @@ -0,0 +1,16 @@ +(module + (func $main (export "main") (result i32) + (local $count i32) + (local $sum i32) + (loop (result i32) + (set_local $count (i32.add (get_local $count) (i32.const 1))) + (set_local $sum (i32.add (get_local $sum) (get_local $count))) + (i32.sub (i32.const 1) (i32.eq + (get_local $count) + (i32.const 100000) + )) + (br_if 0) + (get_local $sum) + ) + ) +) diff --git a/examples/single_pass_tests/memory.wat b/examples/single_pass_tests/memory.wat new file mode 100644 index 00000000000..9c15eb1eae8 --- /dev/null +++ b/examples/single_pass_tests/memory.wat @@ -0,0 +1,90 @@ +(module + (memory 1) + (func $main (export "main") + (call $test_stack_layout) + ) + + (func $test_stack_layout + (local $addr i32) + (set_local $addr (i32.const 16)) + + (i32.store (get_local $addr) (i32.const 10)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 655360)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 11)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 720896)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 12)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 786432)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 13)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 851968)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 14)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 917504)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 15)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 983040)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 16)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 1048576)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 17)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 1114112)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 18)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 1179648)) + (then) + (else (unreachable)) + ) + + (i32.const 1) + (i32.store (get_local $addr) (i32.const 19)) + (if (i32.eq (i32.load (i32.const 14)) (i32.const 1245184)) + (then) + (else (unreachable)) + ) + + (drop) + (drop) + (drop) + (drop) + (drop) + (drop) + (drop) + (drop) + (drop) + ) +) diff --git a/examples/single_pass_tests/select.wat b/examples/single_pass_tests/select.wat new file mode 100644 index 00000000000..cfdd368486e --- /dev/null +++ b/examples/single_pass_tests/select.wat @@ -0,0 +1,20 @@ +(module + (func $main (export "main") + (if (i32.eq (select + (i32.const 10) + (i32.const 20) + (i32.const 1) + ) (i32.const 10)) + (then) + (else (unreachable)) + ) + (if (i32.eq (select + (i32.const 10) + (i32.const 20) + (i32.const 0) + ) (i32.const 20)) + (then) + (else (unreachable)) + ) + ) +) diff --git a/examples/single_pass_tests/tee_local.wat b/examples/single_pass_tests/tee_local.wat new file mode 100644 index 00000000000..70b9e4737b0 --- /dev/null +++ b/examples/single_pass_tests/tee_local.wat @@ -0,0 +1,11 @@ +(module + (func $main (export "main") + (local $x i32) + (tee_local $x (i32.const 3)) + (i32.add (i32.const 4)) + (if (i32.eq (i32.const 7)) + (then) + (else unreachable) + ) + ) +) diff --git a/examples/single_pass_tests/unwinding.wat b/examples/single_pass_tests/unwinding.wat new file mode 100644 index 00000000000..165179808b0 --- /dev/null +++ b/examples/single_pass_tests/unwinding.wat @@ -0,0 +1,38 @@ +(module + (func $main (export "main") + (i32.const 5) + (block (result i32) + (i32.const 10) + (block + (i32.const 20) + (block + (i32.const 50) + (br 1) + ) + (unreachable) + ) + ) + (i32.add) + (if (i32.eq (i32.const 15)) + (then) + (else unreachable) + ) + + (block (result i32) + (i32.const 10) + (block (result i32) + (i32.const 20) + (block + (i32.const 50) + (br 1) + ) + (unreachable) + ) + (i32.add) + ) + (if (i32.eq (i32.const 60)) + (then) + (else unreachable) + ) + ) +) diff --git a/lib/dynasm-backend/Cargo.toml b/lib/dynasm-backend/Cargo.toml new file mode 100644 index 00000000000..ea03ff8cae2 --- /dev/null +++ b/lib/dynasm-backend/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "wasmer-dynasm-backend" +version = "0.1.0" +repository = "https://github.com/wasmerio/wasmer" +description = "Wasmer runtime Dynasm compiler backend" +license = "MIT" +authors = ["The Wasmer Engineering Team "] +edition = "2018" + +[dependencies] +wasmer-runtime-core = { path = "../runtime-core" } +wasmparser = "0.28.0" +dynasm = "0.3.1" +dynasmrt = "0.3.1" +lazy_static = "1.2.0" +byteorder = "1" +nix = "0.13.0" +libc = "0.2.49" diff --git a/lib/dynasm-backend/src/codegen.rs b/lib/dynasm-backend/src/codegen.rs new file mode 100644 index 00000000000..798dea114c0 --- /dev/null +++ b/lib/dynasm-backend/src/codegen.rs @@ -0,0 +1,33 @@ +use wasmer_runtime_core::{ + backend::{FuncResolver, ProtectedCaller}, + module::ModuleInfo, + structures::Map, + types::{FuncIndex, FuncSig, SigIndex}, +}; +use wasmparser::{Operator, Type as WpType}; + +pub trait ModuleCodeGenerator { + fn check_precondition(&mut self, module_info: &ModuleInfo) -> Result<(), CodegenError>; + fn next_function(&mut self) -> Result<&mut FCG, CodegenError>; + fn finalize(self, module_info: &ModuleInfo) -> Result<(PC, FR), CodegenError>; + fn feed_signatures(&mut self, signatures: Map) -> Result<(), CodegenError>; + fn feed_function_signatures( + &mut self, + assoc: Map, + ) -> Result<(), CodegenError>; + fn feed_import_function(&mut self) -> Result<(), CodegenError>; +} + +pub trait FunctionCodeGenerator { + fn feed_return(&mut self, ty: WpType) -> Result<(), CodegenError>; + fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError>; + fn feed_local(&mut self, ty: WpType, n: usize) -> Result<(), CodegenError>; + fn begin_body(&mut self) -> Result<(), CodegenError>; + fn feed_opcode(&mut self, op: Operator, module_info: &ModuleInfo) -> Result<(), CodegenError>; + fn finalize(&mut self) -> Result<(), CodegenError>; +} + +#[derive(Debug)] +pub struct CodegenError { + pub message: &'static str, +} diff --git a/lib/dynasm-backend/src/codegen_x64.rs b/lib/dynasm-backend/src/codegen_x64.rs new file mode 100644 index 00000000000..ef056a69564 --- /dev/null +++ b/lib/dynasm-backend/src/codegen_x64.rs @@ -0,0 +1,5261 @@ +#![allow(clippy::forget_copy)] // Used by dynasm. + +use super::codegen::*; +use super::stack::{ + ControlFrame, ControlStack, IfElseState, ScratchRegister, ValueInfo, ValueLocation, ValueStack, +}; +use crate::protect_unix; +use byteorder::{ByteOrder, LittleEndian}; +use dynasmrt::{ + x64::Assembler, AssemblyOffset, DynamicLabel, DynasmApi, DynasmLabelApi, ExecutableBuffer, +}; +use std::cell::RefCell; +use std::ptr::NonNull; +use std::{any::Any, collections::HashMap, sync::Arc}; +use wasmer_runtime_core::{ + backend::{FuncResolver, ProtectedCaller, Token, UserTrapper}, + error::{RuntimeError, RuntimeResult}, + memory::MemoryType, + module::{ModuleInfo, ModuleInner}, + structures::{Map, TypedIndex}, + types::{ + FuncIndex, FuncSig, ImportedMemoryIndex, LocalFuncIndex, LocalGlobalIndex, + LocalMemoryIndex, LocalOrImport, MemoryIndex, SigIndex, Type, Value, + }, + units::Pages, + vm::{self, ImportBacking, LocalGlobal, LocalMemory, LocalTable}, +}; +use wasmparser::{Operator, Type as WpType}; + +thread_local! { + static CURRENT_EXECUTION_CONTEXT: RefCell> = RefCell::new(Vec::new()); +} + +lazy_static! { + static ref CALL_WASM: unsafe extern "C" fn( + params: *const u8, + params_len: usize, + target: *const u8, + memory_base: *mut u8, + memory_size_pages: usize, + vmctx: *mut vm::Ctx + ) -> i64 = { + let mut assembler = Assembler::new().unwrap(); + let offset = assembler.offset(); + dynasm!( + assembler + ; push rbx + ; push r12 + ; push r13 + ; push r14 + ; push r15 + + ; mov r15, rcx // memory_base + + // Use the upper 16 bits of r15 to store memory size (in pages). This can support memory size up to 4GB. + // Wasmer currently only runs in usermode so here we assume the upper 17 bits of memory base address are all zero. + // FIXME: Change this if want to use this backend in kernel mode. + ; shl r8, 48 + ; or r15, r8 + + ; mov r14, r9 // vmctx + ; lea rax, [>after_call] + ; push rax + ; push rbp + ; mov rbp, rsp + ; sub rsp, rsi // params_len + ; mov rcx, 0 + ; mov r8, rsp + ; _loop: + ; cmp rsi, 0 + ; je >_loop_end + ; mov rax, [rdi] + ; mov [r8], rax + ; add r8, 8 + ; add rdi, 8 + ; sub rsi, 8 + ; jmp <_loop + ; _loop_end: + ; jmp rdx + ; after_call: + ; pop r15 + ; pop r14 + ; pop r13 + ; pop r12 + ; pop rbx + ; ret + ); + let buf = assembler.finalize().unwrap(); + let ret = unsafe { ::std::mem::transmute(buf.ptr(offset)) }; + ::std::mem::forget(buf); + ret + }; + + static ref CONSTRUCT_STACK_AND_CALL_NATIVE: unsafe extern "C" fn (stack_top: *mut u8, stack_base: *mut u8, ctx: *mut vm::Ctx, target: *const vm::Func) -> u64 = { + let mut assembler = Assembler::new().unwrap(); + let offset = assembler.offset(); + dynasm!( + assembler + ; push r15 + ; push r14 + ; push r13 + ; push r12 + ; push r11 + ; push rbp + ; mov rbp, rsp + + ; mov r15, rdi + ; mov r14, rsi + ; mov r13, rdx + ; mov r12, rcx + + ; mov rdi, r13 // ctx + + ; sub r14, 8 + ; cmp r14, r15 + ; jb >stack_ready + + ; mov rsi, [r14] + ; sub r14, 8 + ; cmp r14, r15 + ; jb >stack_ready + + ; mov rdx, [r14] + ; sub r14, 8 + ; cmp r14, r15 + ; jb >stack_ready + + ; mov rcx, [r14] + ; sub r14, 8 + ; cmp r14, r15 + ; jb >stack_ready + + ; mov r8, [r14] + ; sub r14, 8 + ; cmp r14, r15 + ; jb >stack_ready + + ; mov r9, [r14] + ; sub r14, 8 + ; cmp r14, r15 + ; jb >stack_ready + + ; mov rax, r14 + ; sub rax, r15 + ; sub rsp, rax + ; sub rsp, 8 + ; mov rax, QWORD 0xfffffffffffffff0u64 as i64 + ; and rsp, rax + ; mov rax, rsp + ; loop_begin: + ; mov r11, [r14] + ; mov [rax], r11 + ; sub r14, 8 + ; add rax, 8 + ; cmp r14, r15 + ; jb >stack_ready + ; jmp Register { + use self::Register::*; + match sr.raw_id() { + 0 => RDI, + 1 => RSI, + 2 => RDX, + 3 => RCX, + 4 => R8, + 5 => R9, + 6 => R10, + 7 => R11, + 8 => RBX, + 9 => R12, + // 10 => R13, // R13 is reserved as temporary register. + // 11 => R14, // R14 is reserved for vmctx. + // 12 => R15, // R15 is reserved for memory base pointer. + _ => unreachable!(), + } + } + + pub fn is_used(&self, stack: &ValueStack) -> bool { + for val in &stack.values { + match val.location { + ValueLocation::Register(x) => { + if Register::from_scratch_reg(x) == *self { + return true; + } + } + ValueLocation::Stack => break, + } + } + + false + } +} + +#[allow(dead_code)] +pub struct NativeTrampolines { + memory_size_dynamic_local: DynamicLabel, + memory_size_static_local: DynamicLabel, + memory_size_shared_local: DynamicLabel, + memory_size_dynamic_import: DynamicLabel, + memory_size_static_import: DynamicLabel, + memory_size_shared_import: DynamicLabel, + memory_grow_dynamic_local: DynamicLabel, + memory_grow_static_local: DynamicLabel, + memory_grow_shared_local: DynamicLabel, + memory_grow_dynamic_import: DynamicLabel, + memory_grow_static_import: DynamicLabel, + memory_grow_shared_import: DynamicLabel, +} + +pub struct X64ModuleCodeGenerator { + functions: Vec, + signatures: Option>>, + function_signatures: Option>>, + function_labels: Option)>>, + assembler: Option, + native_trampolines: Arc, + func_import_count: usize, +} + +pub struct X64FunctionCode { + signatures: Arc>, + function_signatures: Arc>, + native_trampolines: Arc, + + begin_offset: AssemblyOffset, + assembler: Option, + function_labels: Option)>>, + br_table_data: Option>>, + returns: Vec, + locals: Vec, + num_params: usize, + current_stack_offset: usize, + value_stack: ValueStack, + control_stack: Option, + unreachable_depth: usize, +} + +enum FuncPtrInner {} +#[repr(transparent)] +#[derive(Copy, Clone, Debug)] +struct FuncPtr(*const FuncPtrInner); +unsafe impl Send for FuncPtr {} +unsafe impl Sync for FuncPtr {} + +pub struct X64ExecutionContext { + code: ExecutableBuffer, + functions: Vec, + signatures: Arc>, + function_signatures: Arc>, + function_pointers: Vec, + _br_table_data: Vec>, + func_import_count: usize, +} + +pub struct X64RuntimeResolver { + _code: ExecutableBuffer, + local_pointers: Vec, +} + +impl X64ExecutionContext { + fn get_runtime_resolver( + &self, + module_info: &ModuleInfo, + ) -> Result { + let mut assembler = Assembler::new().unwrap(); + let mut offsets: Vec = vec![]; + + for i in self.func_import_count..self.function_pointers.len() { + offsets.push(assembler.offset()); + X64FunctionCode::emit_managed_call_trampoline( + &mut assembler, + module_info, + self.function_pointers[i], + self.signatures[self.function_signatures[FuncIndex::new(i)]] + .params() + .len(), + )?; + } + + let code = assembler.finalize().unwrap(); + let local_pointers: Vec = + offsets.iter().map(|x| FuncPtr(code.ptr(*x) as _)).collect(); + + Ok(X64RuntimeResolver { + _code: code, + local_pointers: local_pointers, + }) + } +} + +impl FuncResolver for X64RuntimeResolver { + fn get( + &self, + _module: &ModuleInner, + _local_func_index: LocalFuncIndex, + ) -> Option> { + NonNull::new(self.local_pointers[_local_func_index.index() as usize].0 as *mut vm::Func) + } +} + +impl ProtectedCaller for X64ExecutionContext { + fn call( + &self, + _module: &ModuleInner, + _func_index: FuncIndex, + _params: &[Value], + _import_backing: &ImportBacking, + _vmctx: *mut vm::Ctx, + _: Token, + ) -> RuntimeResult> { + let index = _func_index.index() - self.func_import_count; + let ptr = self.code.ptr(self.functions[index].begin_offset); + let return_ty = self.functions[index].returns.last().cloned(); + + if self.functions[index].num_params != _params.len() { + return Err(RuntimeError::Trap { + msg: "param count mismatch".into(), + }); + } + + let f = &self.functions[index]; + let total_size = f.num_params * 8; + + if f.num_params > 0 && f.locals[f.num_params - 1].stack_offset != total_size { + panic!("internal error: inconsistent stack layout"); + } + + let mut param_buf: Vec = vec![0; total_size]; + for i in 0..f.num_params { + let local = &f.locals[i]; + let buf = &mut param_buf[total_size - local.stack_offset..]; + let size = get_size_of_type(&local.ty).unwrap(); + + if is_dword(size) { + match _params[i] { + Value::I32(x) => LittleEndian::write_u32(buf, x as u32), + Value::F32(x) => LittleEndian::write_u32(buf, f32::to_bits(x)), + _ => { + return Err(RuntimeError::Trap { + msg: "signature mismatch".into(), + }); + } + } + } else { + match _params[i] { + Value::I64(x) => LittleEndian::write_u64(buf, x as u64), + Value::F64(x) => LittleEndian::write_u64(buf, f64::to_bits(x)), + _ => { + return Err(RuntimeError::Trap { + msg: "signature mismatch".into(), + }); + } + } + } + } + + let (memory_base, memory_size): (*mut u8, usize) = if _module.info.memories.len() > 0 { + if _module.info.memories.len() != 1 || _module.info.imported_memories.len() != 0 { + return Err(RuntimeError::Trap { + msg: "only one linear memory is supported".into(), + }); + } + unsafe { + let vmctx = _vmctx as *mut vm::InternalCtx; + ((**(*vmctx).memories).base, (**(*vmctx).memories).bound) + } + } else if _module.info.imported_memories.len() > 0 { + if _module.info.memories.len() != 0 || _module.info.imported_memories.len() != 1 { + return Err(RuntimeError::Trap { + msg: "only one linear memory is supported".into(), + }); + } + unsafe { + let vmctx = _vmctx as *mut vm::InternalCtx; + ( + (**(*vmctx).imported_memories).base, + (**(*vmctx).imported_memories).bound, + ) + } + } else { + (::std::ptr::null_mut(), 0) + }; + //println!("MEMORY = {:?}", memory_base); + + CURRENT_EXECUTION_CONTEXT.with(|x| x.borrow_mut().push(self)); + + let ret = unsafe { + protect_unix::call_protected(|| { + CALL_WASM( + param_buf.as_ptr(), + param_buf.len(), + ptr, + memory_base, + memory_size.wrapping_shr(16), + _vmctx, + ) + }) + }; + + CURRENT_EXECUTION_CONTEXT.with(|x| x.borrow_mut().pop().unwrap()); + + let ret = ret?; + + Ok(if let Some(ty) = return_ty { + vec![match ty { + WpType::I32 => Value::I32(ret as i32), + WpType::I64 => Value::I64(ret), + WpType::F32 => Value::F32(f32::from_bits(ret as i32 as u32)), + WpType::F64 => Value::F64(f64::from_bits(ret as u64)), + _ => unreachable!(), + }] + } else { + vec![] + }) + } + + fn get_early_trapper(&self) -> Box { + pub struct Trapper; + + impl UserTrapper for Trapper { + unsafe fn do_early_trap(&self, _data: Box) -> ! { + panic!("do_early_trap"); + } + } + + Box::new(Trapper) + } +} + +#[derive(Copy, Clone, Debug)] +struct Local { + ty: WpType, + stack_offset: usize, +} + +impl X64ModuleCodeGenerator { + pub fn new() -> X64ModuleCodeGenerator { + let mut assembler = Assembler::new().unwrap(); + let nt = NativeTrampolines { + memory_size_dynamic_local: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_size, + MemoryKind::DynamicLocal, + 0usize, + ), + memory_size_static_local: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_size, + MemoryKind::StaticLocal, + 0usize, + ), + memory_size_shared_local: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_size, + MemoryKind::SharedLocal, + 0usize, + ), + memory_size_dynamic_import: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_size, + MemoryKind::DynamicImport, + 0usize, + ), + memory_size_static_import: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_size, + MemoryKind::StaticImport, + 0usize, + ), + memory_size_shared_import: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_size, + MemoryKind::SharedImport, + 0usize, + ), + memory_grow_dynamic_local: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_grow, + MemoryKind::DynamicLocal, + 0usize, + ), + memory_grow_static_local: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_grow, + MemoryKind::StaticLocal, + 0usize, + ), + memory_grow_shared_local: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_grow, + MemoryKind::SharedLocal, + 0usize, + ), + memory_grow_dynamic_import: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_grow, + MemoryKind::DynamicImport, + 0usize, + ), + memory_grow_static_import: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_grow, + MemoryKind::StaticImport, + 0usize, + ), + memory_grow_shared_import: X64FunctionCode::emit_native_call_trampoline( + &mut assembler, + _memory_grow, + MemoryKind::SharedImport, + 0usize, + ), + }; + + X64ModuleCodeGenerator { + functions: vec![], + signatures: None, + function_signatures: None, + function_labels: Some(HashMap::new()), + assembler: Some(assembler), + native_trampolines: Arc::new(nt), + func_import_count: 0, + } + } +} + +impl ModuleCodeGenerator + for X64ModuleCodeGenerator +{ + fn check_precondition(&mut self, _module_info: &ModuleInfo) -> Result<(), CodegenError> { + Ok(()) + } + + fn next_function(&mut self) -> Result<&mut X64FunctionCode, CodegenError> { + let (mut assembler, mut function_labels, br_table_data) = match self.functions.last_mut() { + Some(x) => ( + x.assembler.take().unwrap(), + x.function_labels.take().unwrap(), + x.br_table_data.take().unwrap(), + ), + None => ( + self.assembler.take().unwrap(), + self.function_labels.take().unwrap(), + vec![], + ), + }; + let begin_offset = assembler.offset(); + let begin_label_info = function_labels + .entry(self.functions.len() + self.func_import_count) + .or_insert_with(|| (assembler.new_dynamic_label(), None)); + + begin_label_info.1 = Some(begin_offset); + let begin_label = begin_label_info.0; + + dynasm!( + assembler + ; => begin_label + //; int 3 + ); + let code = X64FunctionCode { + signatures: self.signatures.as_ref().unwrap().clone(), + function_signatures: self.function_signatures.as_ref().unwrap().clone(), + native_trampolines: self.native_trampolines.clone(), + + begin_offset: begin_offset, + assembler: Some(assembler), + function_labels: Some(function_labels), + br_table_data: Some(br_table_data), + returns: vec![], + locals: vec![], + num_params: 0, + current_stack_offset: 0, + value_stack: ValueStack::new(4), // FIXME: Use of R8 and above registers generates incorrect assembly. + control_stack: None, + unreachable_depth: 0, + }; + self.functions.push(code); + Ok(self.functions.last_mut().unwrap()) + } + + fn finalize( + mut self, + module_info: &ModuleInfo, + ) -> Result<(X64ExecutionContext, X64RuntimeResolver), CodegenError> { + let (assembler, mut br_table_data) = match self.functions.last_mut() { + Some(x) => (x.assembler.take().unwrap(), x.br_table_data.take().unwrap()), + None => { + return Err(CodegenError { + message: "no function", + }); + } + }; + let output = assembler.finalize().unwrap(); + + for table in &mut br_table_data { + for entry in table { + *entry = output.ptr(AssemblyOffset(*entry)) as usize; + } + } + + let function_labels = if let Some(x) = self.functions.last() { + x.function_labels.as_ref().unwrap() + } else { + self.function_labels.as_ref().unwrap() + }; + let mut out_labels: Vec = vec![]; + + for i in 0..function_labels.len() { + let (_, offset) = match function_labels.get(&i) { + Some(x) => x, + None => { + return Err(CodegenError { + message: "label not found", + }); + } + }; + let offset = match offset { + Some(x) => x, + None => { + return Err(CodegenError { + message: "offset is none", + }); + } + }; + out_labels.push(FuncPtr(output.ptr(*offset) as _)); + } + + let ctx = X64ExecutionContext { + code: output, + functions: self.functions, + _br_table_data: br_table_data, + func_import_count: self.func_import_count, + signatures: match self.signatures { + Some(x) => x, + None => { + return Err(CodegenError { + message: "no signatures", + }); + } + }, + function_pointers: out_labels, + function_signatures: match self.function_signatures { + Some(x) => x, + None => { + return Err(CodegenError { + message: "no function signatures", + }); + } + }, + }; + let resolver = ctx.get_runtime_resolver(module_info)?; + + Ok((ctx, resolver)) + } + + fn feed_signatures(&mut self, signatures: Map) -> Result<(), CodegenError> { + self.signatures = Some(Arc::new(signatures)); + Ok(()) + } + + fn feed_function_signatures( + &mut self, + assoc: Map, + ) -> Result<(), CodegenError> { + self.function_signatures = Some(Arc::new(assoc)); + Ok(()) + } + + fn feed_import_function(&mut self) -> Result<(), CodegenError> { + let labels = match self.function_labels.as_mut() { + Some(x) => x, + None => { + return Err(CodegenError { + message: "got function import after code", + }); + } + }; + let id = labels.len(); + + let offset = self.assembler.as_mut().unwrap().offset(); + + let label = X64FunctionCode::emit_native_call_trampoline( + self.assembler.as_mut().unwrap(), + invoke_import, + 0, + id, + ); + labels.insert(id, (label, Some(offset))); + + self.func_import_count += 1; + + Ok(()) + } +} + +impl X64FunctionCode { + fn gen_rt_pop(assembler: &mut Assembler, info: &ValueInfo) -> Result<(), CodegenError> { + match info.location { + ValueLocation::Register(_) => {} + ValueLocation::Stack => { + dynasm!( + assembler + ; add rsp, 8 + ); + } + } + Ok(()) + } + + fn emit_reinterpret( + value_stack: &mut ValueStack, + in_ty: WpType, + out_ty: WpType, + ) -> Result<(), CodegenError> { + let val = value_stack.pop()?; + if val.ty != in_ty { + return Err(CodegenError { + message: "reinterpret type mismatch", + }); + } + value_stack.push(out_ty); + Ok(()) + } + + /// Emits a unary operator. + fn emit_unop( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + in_ty: WpType, + out_ty: WpType, + ) -> Result<(), CodegenError> { + let a = value_stack.pop()?; + if a.ty != in_ty { + return Err(CodegenError { + message: "unop(i32) type mismatch", + }); + } + value_stack.push(out_ty); + + match a.location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + f(assembler, value_stack, reg); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; mov rax, [rsp] + ); + f(assembler, value_stack, Register::RAX); + dynasm!( + assembler + ; mov [rsp], rax + ); + } + } + + Ok(()) + } + + fn emit_unop_i32( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + ) -> Result<(), CodegenError> { + Self::emit_unop(assembler, value_stack, f, WpType::I32, WpType::I32) + } + + fn emit_unop_i64( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + ) -> Result<(), CodegenError> { + Self::emit_unop(assembler, value_stack, f, WpType::I64, WpType::I64) + } + + /// Emits a binary operator. + /// + /// Guarantees that the first Register parameter to callback `f` will never be `Register::RAX`. + fn emit_binop( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + in_ty: WpType, + out_ty: WpType, + ) -> Result<(), CodegenError> { + let (a, b) = value_stack.pop2()?; + if a.ty != in_ty || b.ty != in_ty { + return Err(CodegenError { + message: "binop(i32) type mismatch", + }); + } + value_stack.push(out_ty); + + if a.location.is_register() && b.location.is_register() { + // output is in a_reg. + f( + assembler, + value_stack, + Register::from_scratch_reg(a.location.get_register()?), + Register::from_scratch_reg(b.location.get_register()?), + ); + } else if a.location.is_register() { + dynasm!( + assembler + ; pop rax + ); + f( + assembler, + value_stack, + Register::from_scratch_reg(a.location.get_register()?), + Register::RAX, + ); + } else if b.location.is_register() { + unreachable!(); + } else { + dynasm!( + assembler + ; push rcx + ; mov rcx, [rsp + 16] + ; mov rax, [rsp + 8] + ); + f(assembler, value_stack, Register::RCX, Register::RAX); + dynasm!( + assembler + ; mov [rsp + 16], rcx + ; pop rcx + ; add rsp, 8 + ); + } + + Ok(()) + } + + fn emit_binop_i32( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + ) -> Result<(), CodegenError> { + Self::emit_binop(assembler, value_stack, f, WpType::I32, WpType::I32) + } + + fn emit_binop_i64( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + ) -> Result<(), CodegenError> { + Self::emit_binop(assembler, value_stack, f, WpType::I64, WpType::I64) + } + + fn emit_shift( + assembler: &mut Assembler, + value_stack: &ValueStack, + left: Register, + right: Register, + f: F, + ) { + let rcx_used = Register::RCX.is_used(value_stack); + if rcx_used { + dynasm!( + assembler + ; push rcx + ); + } + dynasm!( + assembler + ; mov rcx, Rq(right as u8) + ); + f(assembler, left); + if rcx_used { + dynasm!( + assembler + ; pop rcx + ); + } + } + + fn emit_div_i32( + assembler: &mut Assembler, + value_stack: &ValueStack, + left: Register, + right: Register, + signed: bool, + out: Register, + ) { + let dx_save = + Register::RDX.is_used(value_stack) && left != Register::RDX && right != Register::RDX; + if dx_save { + dynasm!( + assembler + ; push rdx + ); + } + + dynasm!( + assembler + ; push r15 + ; mov r15d, Rd(right as u8) + ; mov eax, Rd(left as u8) + ); + if signed { + dynasm!( + assembler + ; cdq + ; idiv r15d + ); + } else { + dynasm!( + assembler + ; xor edx, edx + ; div r15d + ); + } + dynasm!( + assembler + ; mov Rd(left as u8), Rd(out as u8) + ; pop r15 + ); + + if dx_save { + dynasm!( + assembler + ; pop rdx + ); + } + } + + fn emit_div_i64( + assembler: &mut Assembler, + value_stack: &ValueStack, + left: Register, + right: Register, + signed: bool, + out: Register, + ) { + let dx_save = + Register::RDX.is_used(value_stack) && left != Register::RDX && right != Register::RDX; + if dx_save { + dynasm!( + assembler + ; push rdx + ); + } + + dynasm!( + assembler + ; push r15 + ; mov r15, Rq(right as u8) + ; mov rax, Rq(left as u8) + ); + if signed { + dynasm!( + assembler + ; cqo + ; idiv r15 + ); + } else { + dynasm!( + assembler + ; xor rdx, rdx + ; div r15 + ); + } + dynasm!( + assembler + ; mov Rq(left as u8), Rq(out as u8) + ; pop r15 + ); + + if dx_save { + dynasm!( + assembler + ; pop rdx + ); + } + } + + fn emit_cmp_i32( + assembler: &mut Assembler, + left: Register, + right: Register, + f: F, + ) { + dynasm!( + assembler + ; cmp Rd(left as u8), Rd(right as u8) + ); + f(assembler); + dynasm!( + assembler + ; xor Rd(left as u8), Rd(left as u8) + ; jmp >label_end + ; label_true: + ; mov Rd(left as u8), 1 + ; label_end: + ); + } + + fn emit_cmp_i64( + assembler: &mut Assembler, + left: Register, + right: Register, + f: F, + ) { + dynasm!( + assembler + ; cmp Rq(left as u8), Rq(right as u8) + ); + f(assembler); + dynasm!( + assembler + ; xor Rq(left as u8), Rq(left as u8) + ; jmp >label_end + ; label_true: + ; mov Rq(left as u8), 1 + ; label_end: + ); + } + + fn emit_peek_into_ax( + assembler: &mut Assembler, + value_stack: &ValueStack, + ) -> Result { + let val = match value_stack.values.last() { + Some(x) => *x, + None => { + return Err(CodegenError { + message: "no value", + }); + } + }; + match val.location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov rax, Rq(reg as u8) + ); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; mov rax, [rsp] + ); + } + } + + Ok(val.ty) + } + + fn emit_pop_into_reg( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + target: Register, + ) -> Result { + let val = value_stack.pop()?; + match val.location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rq(target as u8), Rq(reg as u8) + ); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; pop Rq(target as u8) + ); + } + } + + Ok(val.ty) + } + + fn emit_pop_into_ax( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + ) -> Result { + Self::emit_pop_into_reg(assembler, value_stack, Register::RAX) + } + + fn emit_push_from_reg( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + ty: WpType, + source: Register, + ) -> Result<(), CodegenError> { + let loc = value_stack.push(ty); + match loc { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rq(reg as u8), Rq(source as u8) + ); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; push Rq(source as u8) + ); + } + } + + Ok(()) + } + + fn emit_push_from_ax( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + ty: WpType, + ) -> Result<(), CodegenError> { + Self::emit_push_from_reg(assembler, value_stack, ty, Register::RAX) + } + + fn emit_leave_frame( + assembler: &mut Assembler, + frame: &ControlFrame, + value_stack: &mut ValueStack, + peek: bool, + ) -> Result<(), CodegenError> { + let ret_ty = match frame.returns.len() { + 1 => Some(frame.returns[0]), + 0 => None, + _ => { + return Err(CodegenError { + message: "more than one block returns are not yet supported", + }); + } + }; + + if value_stack.values.len() < frame.value_stack_depth_before + frame.returns.len() { + return Err(CodegenError { + message: "value stack underflow", + }); + } + + if let Some(_) = ret_ty { + if value_stack.values.iter().last().map(|x| x.ty) != ret_ty { + return Err(CodegenError { + message: "value type != return type", + }); + } + if peek { + Self::emit_peek_into_ax(assembler, value_stack)?; + } else { + Self::emit_pop_into_ax(assembler, value_stack)?; + } + } + + Ok(()) + } + + fn emit_else( + assembler: &mut Assembler, + control_stack: &mut ControlStack, + value_stack: &mut ValueStack, + was_unreachable: bool, + ) -> Result<(), CodegenError> { + let frame = match control_stack.frames.last_mut() { + Some(x) => x, + None => { + return Err(CodegenError { + message: "no frame (else)", + }); + } + }; + + if !was_unreachable { + Self::emit_leave_frame(assembler, frame, value_stack, false)?; + if value_stack.values.len() != frame.value_stack_depth_before { + return Err(CodegenError { + message: "value_stack.values.len() != frame.value_stack_depth_before", + }); + } + } else { + // No need to actually unwind the stack here. + value_stack.reset_depth(frame.value_stack_depth_before); + } + + match frame.if_else { + IfElseState::If(label) => { + dynasm!( + assembler + ; jmp =>frame.label + ; => label + ); + frame.if_else = IfElseState::Else; + } + _ => { + return Err(CodegenError { + message: "unexpected if else state", + }); + } + } + + Ok(()) + } + + fn emit_block_end( + assembler: &mut Assembler, + control_stack: &mut ControlStack, + value_stack: &mut ValueStack, + was_unreachable: bool, + ) -> Result<(), CodegenError> { + let frame = match control_stack.frames.pop() { + Some(x) => x, + None => { + return Err(CodegenError { + message: "no frame (block end)", + }); + } + }; + + if !was_unreachable { + Self::emit_leave_frame(assembler, &frame, value_stack, false)?; + if value_stack.values.len() != frame.value_stack_depth_before { + return Err(CodegenError { + message: "value_stack.values.len() != frame.value_stack_depth_before", + }); + } + } else { + // No need to actually unwind the stack here. + value_stack.reset_depth(frame.value_stack_depth_before); + } + + if !frame.loop_like { + match frame.if_else { + IfElseState::None | IfElseState::Else => { + dynasm!( + assembler + ; => frame.label + ); + } + IfElseState::If(label) => { + dynasm!( + assembler + ; => frame.label + ; => label + ); + + if frame.returns.len() != 0 { + return Err(CodegenError { + message: "if without else, with non-empty returns", + }); + } + } + } + } + + if frame.returns.len() == 1 { + let loc = value_stack.push(frame.returns[0]); + match loc { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rq(reg as u8), rax + ); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; push rax + ); + } + } + } + + Ok(()) + } + + fn emit_jmp( + assembler: &mut Assembler, + control_stack: &ControlStack, + value_stack: &mut ValueStack, + relative_frame_offset: usize, + ) -> Result<(), CodegenError> { + let frame = if relative_frame_offset >= control_stack.frames.len() { + return Err(CodegenError { + message: "jmp offset out of bounds", + }); + } else { + &control_stack.frames[control_stack.frames.len() - 1 - relative_frame_offset] + }; + + if !frame.loop_like { + Self::emit_leave_frame(assembler, frame, value_stack, true)?; + } + + let mut sp_diff: usize = 0; + + for i in 0..value_stack.values.len() - frame.value_stack_depth_before { + let vi = value_stack.values[value_stack.values.len() - 1 - i]; + if vi.location == ValueLocation::Stack { + sp_diff += 8 + } else { + break; + } + } + + dynasm!( + assembler + ; add rsp, sp_diff as i32 + ; jmp =>frame.label + ); + + Ok(()) + } + + fn emit_return( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + returns: &Vec, + ) -> Result<(), CodegenError> { + match returns.len() { + 0 => {} + 1 => { + if value_stack.values.iter().last().map(|x| x.ty) != Some(returns[0]) { + return Err(CodegenError { + message: "self.value_stack.last().cloned() != Some(self.returns[0])", + }); + } + Self::emit_pop_into_ax(assembler, value_stack)?; + } + _ => { + return Err(CodegenError { + message: "multiple return values is not yet supported", + }); + } + } + + dynasm!( + assembler + ; mov rsp, rbp + ; pop rbp + ; ret + ); + + Ok(()) + } + + fn emit_update_memory_from_ctx( + assembler: &mut Assembler, + info: &ModuleInfo, + ) -> Result<(), CodegenError> { + if info.memories.len() > 0 { + if info.memories.len() != 1 || info.imported_memories.len() != 0 { + return Err(CodegenError { + message: "only one linear memory is supported", + }); + } + dynasm!( + assembler + ; mov r15, r14 => vm::InternalCtx.memories + ); + } else if info.imported_memories.len() > 0 { + if info.memories.len() != 0 || info.imported_memories.len() != 1 { + return Err(CodegenError { + message: "only one linear memory is supported", + }); + } + dynasm!( + assembler + ; mov r15, r14 => vm::InternalCtx.imported_memories + ); + } else { + return Ok(()); + }; + + dynasm!( + assembler + ; mov r15, [r15] + ; mov r13, r15 => LocalMemory.bound + ; shr r13, 16 // 65536 bytes per page + ; shl r13, 48 + ; mov r15, r15 => LocalMemory.base + ; or r15, r13 + ); + Ok(()) + } + + fn emit_managed_call_trampoline( + assembler: &mut Assembler, + info: &ModuleInfo, + target: FuncPtr, + num_params: usize, + ) -> Result<(), CodegenError> { + dynasm!( + assembler + ; push rbp + ; mov rbp, rsp + ); + + for i in 0..num_params { + match i { + i if i < 5 => { + let reg = match i { + 0 => Register::RSI, + 1 => Register::RDX, + 2 => Register::RCX, + 3 => Register::R8, + 4 => Register::R9, + _ => unreachable!(), + }; + dynasm!( + assembler + ; push Rq(reg as u8) + ); + } + i => { + let offset = (i - 5) * 8; + dynasm!( + assembler + ; mov rax, [rbp + (16 + offset) as i32] + ; push rax + ); + } + } + } + + dynasm!( + assembler + ; mov r9, rdi // vmctx + ; mov rdx, QWORD target.0 as usize as i64 + ; mov rsi, QWORD (num_params * 8) as i64 + ; mov rdi, rsp + ); + + let has_memory = if info.memories.len() > 0 { + if info.memories.len() != 1 || info.imported_memories.len() != 0 { + return Err(CodegenError { + message: "only one linear memory is supported", + }); + } + dynasm!( + assembler + ; mov rcx, r9 => vm::InternalCtx.memories + ); + true + } else if info.imported_memories.len() > 0 { + if info.memories.len() != 0 || info.imported_memories.len() != 1 { + return Err(CodegenError { + message: "only one linear memory is supported", + }); + } + dynasm!( + assembler + ; mov rcx, r9 => vm::InternalCtx.imported_memories + ); + true + } else { + false + }; + + if has_memory { + dynasm!( + assembler + ; mov rcx, [rcx] + ; mov r8, rcx => LocalMemory.bound + ; shr r8, 16 // 65536 bytes per page + ; mov rcx, rcx => LocalMemory.base + ); + } else { + dynasm!( + assembler + ; mov rcx, 0 + ); + } + + dynasm!( + assembler + ; mov rax, QWORD *CALL_WASM as usize as i64 + ; call rax + ; mov rsp, rbp + ; pop rbp + ; ret + ); + + Ok(()) + } + + fn emit_f32_int_conv_check( + assembler: &mut Assembler, + reg: Register, + lower_bound: f32, + upper_bound: f32, + ) { + let lower_bound = f32::to_bits(lower_bound); + let upper_bound = f32::to_bits(upper_bound); + + dynasm!( + assembler + ; movq xmm5, r15 + + // underflow + ; movd xmm1, Rd(reg as u8) + ; mov r15d, lower_bound as i32 + ; movd xmm2, r15d + ; vcmpltss xmm0, xmm1, xmm2 + ; movd r15d, xmm0 + ; cmp r15d, 1 + ; je >trap + + // overflow + ; mov r15d, upper_bound as i32 + ; movd xmm2, r15d + ; vcmpgtss xmm0, xmm1, xmm2 + ; movd r15d, xmm0 + ; cmp r15d, 1 + ; je >trap + + // NaN + ; vcmpeqss xmm0, xmm1, xmm1 + ; movd r15d, xmm0 + ; cmp r15d, 0 + ; je >trap + + ; movq r15, xmm5 + ; jmp >ok + + ; trap: + ; ud2 + + ; ok: + ); + } + + fn emit_f64_int_conv_check( + assembler: &mut Assembler, + reg: Register, + lower_bound: f64, + upper_bound: f64, + ) { + let lower_bound = f64::to_bits(lower_bound); + let upper_bound = f64::to_bits(upper_bound); + + dynasm!( + assembler + ; movq xmm5, r15 + + // underflow + ; movq xmm1, Rq(reg as u8) + ; mov r15, QWORD lower_bound as i64 + ; movq xmm2, r15 + ; vcmpltsd xmm0, xmm1, xmm2 + ; movd r15d, xmm0 + ; cmp r15d, 1 + ; je >trap + + // overflow + ; mov r15, QWORD upper_bound as i64 + ; movq xmm2, r15 + ; vcmpgtsd xmm0, xmm1, xmm2 + ; movd r15d, xmm0 + ; cmp r15d, 1 + ; je >trap + + // NaN + ; vcmpeqsd xmm0, xmm1, xmm1 + ; movd r15d, xmm0 + ; cmp r15d, 0 + ; je >trap + + ; movq r15, xmm5 + ; jmp >ok + + ; trap: + ; ud2 + + ; ok: + ); + } + + fn emit_native_call_trampoline( + assembler: &mut Assembler, + target: unsafe extern "C" fn( + ctx1: A, + ctx2: B, + stack_top: *mut u8, + stack_base: *mut u8, + vmctx: *mut vm::Ctx, + memory_base: *mut u8, + ) -> u64, + ctx1: A, + ctx2: B, + ) -> DynamicLabel { + let label = assembler.new_dynamic_label(); + + dynasm!( + assembler + ; =>label + ); + + // FIXME: Check at compile time. + assert_eq!(::std::mem::size_of::(), ::std::mem::size_of::()); + assert_eq!(::std::mem::size_of::(), ::std::mem::size_of::()); + + dynasm!( + assembler + ; mov rdi, QWORD unsafe { ::std::mem::transmute_copy::(&ctx1) } + ; mov rsi, QWORD unsafe { ::std::mem::transmute_copy::(&ctx2) } + ; mov rdx, rsp + ; mov rcx, rbp + ; mov r8, r14 // vmctx + ; mov r9, r15 // memory_base + ; mov rax, QWORD 0xfffffffffffffff0u64 as i64 + ; and rsp, rax + ; mov rax, QWORD target as i64 + ; call rax + ; mov rsp, rbp + ; pop rbp + ; ret + ); + + label + } + + fn emit_call_raw( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + target: DynamicLabel, + params: &[WpType], + returns: &[WpType], + ) -> Result<(), CodegenError> { + let total_size: usize = params.len() * 8; + + if params.len() > value_stack.values.len() { + return Err(CodegenError { + message: "value stack underflow in call", + }); + } + + let mut saved_regs: Vec = Vec::new(); + + for v in &value_stack.values[0..value_stack.values.len() - params.len()] { + match v.location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; push Rq(reg as u8) + ); + saved_regs.push(reg); + } + ValueLocation::Stack => break, + } + } + + dynasm!( + assembler + ; lea rax, [>after_call] // TODO: Is this correct? + ; push rax + ; push rbp + ); + + if total_size != 0 { + dynasm!( + assembler + ; sub rsp, total_size as i32 + ); + } + + let mut offset: usize = 0; + let mut caller_stack_offset: usize = 0; + for ty in params.iter().rev() { + let val = value_stack.pop()?; + if val.ty != *ty { + return Err(CodegenError { + message: "value type mismatch in call", + }); + } + + match val.location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov [rsp + offset as i32], Rq(reg as u8) + ); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; mov rax, [rsp + (total_size + 16 + saved_regs.len() * 8 + caller_stack_offset) as i32] + ; mov [rsp + offset as i32], rax + ); + caller_stack_offset += 8; + } + } + + offset += 8; + } + + assert_eq!(offset, total_size); + + dynasm!( + assembler + ; mov rbp, rsp + ); + if total_size != 0 { + dynasm!( + assembler + ; add rbp, total_size as i32 + ); + } + dynasm!( + assembler + ; jmp =>target + ; after_call: + ); + + for reg in saved_regs.iter().rev() { + dynasm!( + assembler + ; pop Rq(*reg as u8) + ); + } + + if caller_stack_offset != 0 { + dynasm!( + assembler + ; add rsp, caller_stack_offset as i32 + ); + } + + match returns.len() { + 0 => {} + 1 => { + Self::emit_push_from_ax(assembler, value_stack, returns[0])?; + } + _ => { + return Err(CodegenError { + message: "more than 1 function returns are not supported", + }); + } + } + + Ok(()) + } + + fn emit_memory_bound_check_if_needed( + assembler: &mut Assembler, + module_info: &ModuleInfo, + offset_reg: Register, + value_size: usize, + ) { + let mem_desc = match MemoryIndex::new(0).local_or_import(module_info) { + LocalOrImport::Local(local_mem_index) => &module_info.memories[local_mem_index], + LocalOrImport::Import(import_mem_index) => { + &module_info.imported_memories[import_mem_index].1 + } + }; + let need_check = match mem_desc.memory_type() { + MemoryType::Dynamic => true, + MemoryType::Static | MemoryType::SharedStatic => false, + }; + if need_check { + dynasm!( + assembler + ; movq xmm5, r14 + ; lea r14, [Rq(offset_reg as u8) + value_size as i32] // overflow isn't possible since offset_reg contains a 32-bit value. + + ; mov r13, r15 + ; shr r13, 48 + ; shl r13, 16 + ; cmp r14, r13 + ; ja >out_of_bounds + ; jmp >ok + + ; out_of_bounds: + ; ud2 + ; ok: + ; movq r14, xmm5 + ); + } + } + + fn emit_memory_load( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + out_ty: WpType, + module_info: &ModuleInfo, + read_size: usize, + ) -> Result<(), CodegenError> { + let addr_info = value_stack.pop()?; + let out_loc = value_stack.push(out_ty); + + if addr_info.ty != WpType::I32 { + return Err(CodegenError { + message: "memory address must be i32", + }); + } + + assert_eq!(out_loc, addr_info.location); + + match addr_info.location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rd(reg as u8), Rd(reg as u8) + ); + Self::emit_memory_bound_check_if_needed(assembler, module_info, reg, read_size); + dynasm!( + assembler + ; add Rq(reg as u8), r15 + ; shl Rq(reg as u8), 16 + ; shr Rq(reg as u8), 16 + ); + f(assembler, reg); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; pop rax + ; mov eax, eax + ); + Self::emit_memory_bound_check_if_needed( + assembler, + module_info, + Register::RAX, + read_size, + ); + dynasm!( + assembler + ; add rax, r15 + ; shl rax, 16 + ; shr rax, 16 + ); + f(assembler, Register::RAX); + dynasm!( + assembler + ; push rax + ) + } + } + Ok(()) + } + + fn emit_memory_store( + assembler: &mut Assembler, + value_stack: &mut ValueStack, + f: F, + value_ty: WpType, + module_info: &ModuleInfo, + write_size: usize, + ) -> Result<(), CodegenError> { + let value_info = value_stack.pop()?; + let addr_info = value_stack.pop()?; + + if addr_info.ty != WpType::I32 { + return Err(CodegenError { + message: "memory address must be i32", + }); + } + + if value_info.ty != value_ty { + return Err(CodegenError { + message: "value type mismatch in memory store", + }); + } + + match value_info.location { + ValueLocation::Register(x) => { + let value_reg = Register::from_scratch_reg(x); + let addr_reg = + Register::from_scratch_reg(addr_info.location.get_register().unwrap()); // must be a register + dynasm!( + assembler + ; mov Rd(addr_reg as u8), Rd(addr_reg as u8) + ); + Self::emit_memory_bound_check_if_needed( + assembler, + module_info, + addr_reg, + write_size, + ); + dynasm!( + assembler + ; add Rq(addr_reg as u8), r15 + ; shl Rq(addr_reg as u8), 16 + ; shr Rq(addr_reg as u8), 16 + ); + f(assembler, addr_reg, value_reg); + } + ValueLocation::Stack => { + match addr_info.location { + ValueLocation::Register(x) => { + let addr_reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rd(addr_reg as u8), Rd(addr_reg as u8) + ); + Self::emit_memory_bound_check_if_needed( + assembler, + module_info, + addr_reg, + write_size, + ); + dynasm!( + assembler + ; add Rq(addr_reg as u8), r15 + ; shl Rq(addr_reg as u8), 16 + ; shr Rq(addr_reg as u8), 16 + ; pop rax + ); + f(assembler, addr_reg, Register::RAX); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; mov [rsp - 8], rcx // red zone + ; pop rax // value + ; pop rcx // address + ); + dynasm!( + assembler + ; mov ecx, ecx + ); + Self::emit_memory_bound_check_if_needed( + assembler, + module_info, + Register::RCX, + write_size, + ); + dynasm!( + assembler + ; add rcx, r15 + ; shl rcx, 16 + ; shr rcx, 16 + ); + f(assembler, Register::RCX, Register::RAX); + dynasm!( + assembler + ; mov rcx, [rsp - 24] + ); + } + } + } + } + Ok(()) + } +} + +impl FunctionCodeGenerator for X64FunctionCode { + fn feed_return(&mut self, ty: WpType) -> Result<(), CodegenError> { + self.returns.push(ty); + Ok(()) + } + + /// Stack layout of a call frame: + /// - Return address + /// - Old RBP + /// - Params in reversed order, caller initialized + /// - Locals in reversed order, callee initialized + fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError> { + self.current_stack_offset += 8; + self.locals.push(Local { + ty: ty, + stack_offset: self.current_stack_offset, + }); + + self.num_params += 1; + + Ok(()) + } + + fn feed_local(&mut self, ty: WpType, n: usize) -> Result<(), CodegenError> { + let assembler = self.assembler.as_mut().unwrap(); + let size = get_size_of_type(&ty)?; + + if is_dword(size) { + for _ in 0..n { + // FIXME: check range of n + self.current_stack_offset += 4; + self.locals.push(Local { + ty: ty, + stack_offset: self.current_stack_offset, + }); + dynasm!( + assembler + ; sub rsp, 4 + ; mov DWORD [rsp], 0 + ); + } + if n % 2 == 1 { + self.current_stack_offset += 4; + dynasm!( + assembler + ; sub rsp, 4 + ); + } + } else { + for _ in 0..n { + // FIXME: check range of n + self.current_stack_offset += 8; + self.locals.push(Local { + ty: ty, + stack_offset: self.current_stack_offset, + }); + dynasm!( + assembler + ; push 0 + ); + } + } + Ok(()) + } + fn begin_body(&mut self) -> Result<(), CodegenError> { + self.control_stack = Some(ControlStack::new( + self.assembler.as_mut().unwrap().new_dynamic_label(), + self.returns.clone(), + )); + Ok(()) + } + fn feed_opcode(&mut self, op: Operator, module_info: &ModuleInfo) -> Result<(), CodegenError> { + let was_unreachable; + + if self.unreachable_depth > 0 { + was_unreachable = true; + match op { + Operator::Block { .. } | Operator::Loop { .. } | Operator::If { .. } => { + self.unreachable_depth += 1; + } + Operator::End => { + self.unreachable_depth -= 1; + } + Operator::Else => { + // We are in a reachable true branch + if self.unreachable_depth == 1 { + if let Some(IfElseState::If(_)) = self + .control_stack + .as_ref() + .unwrap() + .frames + .last() + .map(|x| x.if_else) + { + self.unreachable_depth -= 1; + } + } + } + _ => {} + } + if self.unreachable_depth > 0 { + return Ok(()); + } + } else { + was_unreachable = false; + } + + let assembler = self.assembler.as_mut().unwrap(); + + match op { + Operator::GetGlobal { global_index } => { + let mut global_index = global_index as usize; + if global_index < module_info.imported_globals.len() { + dynasm!( + assembler + ; mov rax, r14 => vm::InternalCtx.imported_globals + ); + } else { + global_index -= module_info.imported_globals.len(); + if global_index >= module_info.globals.len() { + return Err(CodegenError { + message: "global out of bounds", + }); + } + dynasm!( + assembler + ; mov rax, r14 => vm::InternalCtx.globals + ); + } + + dynasm!( + assembler + ; mov rax, [rax + (global_index as i32) * 8] + ; mov rax, rax => LocalGlobal.data + ); + Self::emit_push_from_ax( + assembler, + &mut self.value_stack, + type_to_wp_type( + module_info.globals[LocalGlobalIndex::new(global_index)] + .desc + .ty, + ), + )?; + } + Operator::SetGlobal { global_index } => { + let ty = Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; + + let mut global_index = global_index as usize; + if global_index < module_info.imported_globals.len() { + dynasm!( + assembler + ; push rbx + ; mov rbx, r14 => vm::InternalCtx.imported_globals + ); + } else { + global_index -= module_info.imported_globals.len(); + if global_index >= module_info.globals.len() { + return Err(CodegenError { + message: "global out of bounds", + }); + } + dynasm!( + assembler + ; push rbx + ; mov rbx, r14 => vm::InternalCtx.globals + ); + } + + if ty + != type_to_wp_type( + module_info.globals[LocalGlobalIndex::new(global_index)] + .desc + .ty, + ) + { + return Err(CodegenError { + message: "type mismatch in SetGlobal", + }); + } + dynasm!( + assembler + ; mov rbx, [rbx + (global_index as i32) * 8] + ; mov rbx => LocalGlobal.data, rax + ; pop rbx + ); + } + Operator::GetLocal { local_index } => { + let local_index = local_index as usize; + if local_index >= self.locals.len() { + return Err(CodegenError { + message: "local out of bounds", + }); + } + let local = self.locals[local_index]; + let location = self.value_stack.push(local.ty); + let size = get_size_of_type(&local.ty)?; + + match location { + ValueLocation::Register(id) => { + if is_dword(size) { + dynasm!( + assembler + ; mov Rd(Register::from_scratch_reg(id) as u8), [rbp - (local.stack_offset as i32)] + ); + } else { + dynasm!( + assembler + ; mov Rq(Register::from_scratch_reg(id) as u8), [rbp - (local.stack_offset as i32)] + ); + } + } + ValueLocation::Stack => { + if is_dword(size) { + dynasm!( + assembler + ; mov eax, [rbp - (local.stack_offset as i32)] + ; push rax + ); + } else { + dynasm!( + assembler + ; mov rax, [rbp - (local.stack_offset as i32)] + ; push rax + ); + } + } + } + } + Operator::SetLocal { local_index } => { + let local_index = local_index as usize; + if local_index >= self.locals.len() { + return Err(CodegenError { + message: "local out of bounds", + }); + } + let local = self.locals[local_index]; + let ty = Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; + if ty != local.ty { + return Err(CodegenError { + message: "SetLocal type mismatch", + }); + } + + if is_dword(get_size_of_type(&ty)?) { + dynasm!( + assembler + ; mov [rbp - (local.stack_offset as i32)], eax + ); + } else { + dynasm!( + assembler + ; mov [rbp - (local.stack_offset as i32)], rax + ); + } + } + Operator::TeeLocal { local_index } => { + let local_index = local_index as usize; + if local_index >= self.locals.len() { + return Err(CodegenError { + message: "local out of bounds", + }); + } + let local = self.locals[local_index]; + let ty = Self::emit_peek_into_ax(assembler, &self.value_stack)?; + if ty != local.ty { + return Err(CodegenError { + message: "TeeLocal type mismatch", + }); + } + + if is_dword(get_size_of_type(&ty)?) { + dynasm!( + assembler + ; mov [rbp - (local.stack_offset as i32)], eax + ); + } else { + dynasm!( + assembler + ; mov [rbp - (local.stack_offset as i32)], rax + ); + } + } + Operator::I32Const { value } => { + let location = self.value_stack.push(WpType::I32); + match location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rd(reg as u8), value + ); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; push value + ); + } + } + } + Operator::I32Add => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; add Rd(left as u8), Rd(right as u8) + ) + }, + )?; + } + Operator::I32Sub => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; sub Rd(left as u8), Rd(right as u8) + ) + }, + )?; + } + Operator::I32Mul => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; imul Rd(left as u8), Rd(right as u8) + ) + }, + )?; + } + Operator::I32DivU => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i32( + assembler, + value_stack, + left, + right, + false, + Register::RAX, + ); + }, + )?; + } + Operator::I32DivS => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i32( + assembler, + value_stack, + left, + right, + true, + Register::RAX, + ); + }, + )?; + } + Operator::I32RemU => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i32( + assembler, + value_stack, + left, + right, + false, + Register::RDX, + ); + }, + )?; + } + Operator::I32RemS => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i32( + assembler, + value_stack, + left, + right, + true, + Register::RDX, + ); + }, + )?; + } + Operator::I32And => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; and Rd(left as u8), Rd(right as u8) + ); + }, + )?; + } + Operator::I32Or => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; or Rd(left as u8), Rd(right as u8) + ); + }, + )?; + } + Operator::I32Xor => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; xor Rd(left as u8), Rd(right as u8) + ); + }, + )?; + } + Operator::I32Eq => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; cmp Rd(left as u8), Rd(right as u8) + ; lahf + ; shr ax, 14 + ; and eax, 1 + ; mov Rd(left as u8), eax + ); + }, + )?; + } + Operator::I32Ne => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; cmp Rd(left as u8), Rd(right as u8) + ; lahf + ; shr ax, 14 + ; and eax, 1 + ; xor eax, 1 + ; mov Rd(left as u8), eax + ); + }, + )?; + } + Operator::I32Eqz => { + Self::emit_unop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; cmp Rd(reg as u8), 0 + ; lahf + ; shr ax, 14 + ; and eax, 1 + ); + if reg != Register::RAX { + dynasm!( + assembler + ; mov Rd(reg as u8), eax + ); + } + }, + )?; + } + Operator::I32Clz => { + Self::emit_unop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; lzcnt Rd(reg as u8), Rd(reg as u8) + ); + }, + )?; + } + Operator::I32Ctz => { + Self::emit_unop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; tzcnt Rd(reg as u8), Rd(reg as u8) + ); + }, + )?; + } + Operator::I32Popcnt => { + Self::emit_unop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; popcnt Rd(reg as u8), Rd(reg as u8) + ); + }, + )?; + } + Operator::I32Shl => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; shl Rd(left as u8), cl + ) + }); + }, + )?; + } + Operator::I32ShrU => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; shr Rd(left as u8), cl + ) + }); + }, + )?; + } + Operator::I32ShrS => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; sar Rd(left as u8), cl + ) + }); + }, + )?; + } + Operator::I32Rotl => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; rol Rd(left as u8), cl + ) + }); + }, + )?; + } + Operator::I32Rotr => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; ror Rd(left as u8), cl + ) + }); + }, + )?; + } + // Comparison operators. + // https://en.wikibooks.org/wiki/X86_Assembly/Control_Flow + // TODO: Is reading flag register directly faster? + Operator::I32LtS => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jl >label_true + ); + }); + }, + )?; + } + Operator::I32LeS => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jle >label_true + ); + }); + }, + )?; + } + Operator::I32GtS => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jg >label_true + ); + }); + }, + )?; + } + Operator::I32GeS => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jge >label_true + ); + }); + }, + )?; + } + Operator::I32LtU => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jb >label_true + ); + }); + }, + )?; + } + Operator::I32LeU => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jbe >label_true + ); + }); + }, + )?; + } + Operator::I32GtU => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; ja >label_true + ); + }); + }, + )?; + } + Operator::I32GeU => { + Self::emit_binop_i32( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i32(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jae >label_true + ); + }); + }, + )?; + } + Operator::I64Const { value } => { + let location = self.value_stack.push(WpType::I64); + match location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rq(reg as u8), QWORD value + ); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; mov rax, QWORD value + ; push rax + ); + } + } + } + Operator::I64Add => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; add Rq(left as u8), Rq(right as u8) + ) + }, + )?; + } + Operator::I64Sub => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; sub Rq(left as u8), Rq(right as u8) + ) + }, + )?; + } + Operator::I64Mul => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; imul Rq(left as u8), Rq(right as u8) + ) + }, + )?; + } + Operator::I64DivU => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i64( + assembler, + value_stack, + left, + right, + false, + Register::RAX, + ); + }, + )?; + } + Operator::I64DivS => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i64( + assembler, + value_stack, + left, + right, + true, + Register::RAX, + ); + }, + )?; + } + Operator::I64RemU => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i64( + assembler, + value_stack, + left, + right, + false, + Register::RDX, + ); + }, + )?; + } + Operator::I64RemS => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_div_i64( + assembler, + value_stack, + left, + right, + true, + Register::RDX, + ); + }, + )?; + } + Operator::I64And => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; and Rq(left as u8), Rq(right as u8) + ); + }, + )?; + } + Operator::I64Or => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; or Rq(left as u8), Rq(right as u8) + ); + }, + )?; + } + Operator::I64Xor => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; xor Rq(left as u8), Rq(right as u8) + ); + }, + )?; + } + Operator::I64Eq => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; cmp Rq(left as u8), Rq(right as u8) + ; lahf + ; shr ax, 14 + ; and eax, 1 + ; mov Rd(left as u8), eax + ); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64Ne => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; cmp Rq(left as u8), Rq(right as u8) + ; lahf + ; shr ax, 14 + ; and eax, 1 + ; xor eax, 1 + ; mov Rd(left as u8), eax + ); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64Eqz => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; cmp Rq(reg as u8), 0 + ; lahf + ; shr ax, 14 + ; and eax, 1 + ); + if reg != Register::RAX { + dynasm!( + assembler + ; mov Rd(reg as u8), eax + ); + } + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64Clz => { + Self::emit_unop_i64( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; lzcnt Rq(reg as u8), Rq(reg as u8) + ); + }, + )?; + } + Operator::I64Ctz => { + Self::emit_unop_i64( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; tzcnt Rq(reg as u8), Rq(reg as u8) + ); + }, + )?; + } + Operator::I64Popcnt => { + Self::emit_unop_i64( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; popcnt Rq(reg as u8), Rq(reg as u8) + ); + }, + )?; + } + Operator::I64Shl => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; shl Rq(left as u8), cl + ) + }); + }, + )?; + } + Operator::I64ShrU => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; shr Rq(left as u8), cl + ) + }); + }, + )?; + } + Operator::I64ShrS => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; sar Rq(left as u8), cl + ) + }); + }, + )?; + } + Operator::I64Rotl => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; rol Rq(left as u8), cl + ) + }); + }, + )?; + } + Operator::I64Rotr => { + Self::emit_binop_i64( + assembler, + &mut self.value_stack, + |assembler, value_stack, left, right| { + Self::emit_shift(assembler, value_stack, left, right, |assembler, left| { + dynasm!( + assembler + ; ror Rq(left as u8), cl + ) + }); + }, + )?; + } + // Comparison operators. + // https://en.wikibooks.org/wiki/X86_Assembly/Control_Flow + // TODO: Is reading flag register directly faster? + Operator::I64LtS => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jl >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64LeS => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jle >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64GtS => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jg >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64GeS => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jge >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64LtU => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jb >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64LeU => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jbe >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64GtU => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; ja >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64GeU => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + Self::emit_cmp_i64(assembler, left, right, |assembler| { + dynasm!( + assembler + ; jae >label_true + ); + }); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::I64ExtendSI32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; movsx Rq(reg as u8), Rd(reg as u8) + ); + }, + WpType::I32, + WpType::I64, + )?; + } + Operator::I64ExtendUI32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |_assembler, _value_stack, _reg| { + // FIXME: Is it correct to do nothing here? + }, + WpType::I32, + WpType::I64, + )?; + } + Operator::I32WrapI64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; mov Rd(reg as u8), Rd(reg as u8) // clear upper 32 bits + ); + }, + WpType::I64, + WpType::I32, + )?; + } + Operator::Block { ty } => { + self.control_stack + .as_mut() + .unwrap() + .frames + .push(ControlFrame { + label: assembler.new_dynamic_label(), + loop_like: false, + if_else: IfElseState::None, + returns: match ty { + WpType::EmptyBlockType => vec![], + _ => vec![ty], + }, + value_stack_depth_before: self.value_stack.values.len(), + }); + } + Operator::Unreachable => { + dynasm!( + assembler + ; ud2 + ); + self.unreachable_depth = 1; + } + Operator::Drop => { + let info = self.value_stack.pop()?; + Self::gen_rt_pop(assembler, &info)?; + } + Operator::Return => { + Self::emit_return(assembler, &mut self.value_stack, &self.returns)?; + self.unreachable_depth = 1; + } + Operator::Call { function_index } => { + let function_index = function_index as usize; + let label = self + .function_labels + .as_mut() + .unwrap() + .entry(function_index) + .or_insert_with(|| (assembler.new_dynamic_label(), None)) + .0; + let sig_index = match self.function_signatures.get(FuncIndex::new(function_index)) { + Some(x) => *x, + None => { + return Err(CodegenError { + message: "signature not found", + }); + } + }; + let sig = match self.signatures.get(sig_index) { + Some(x) => x, + None => { + return Err(CodegenError { + message: "signature does not exist", + }); + } + }; + let param_types: Vec = + sig.params().iter().cloned().map(type_to_wp_type).collect(); + let return_types: Vec = + sig.returns().iter().cloned().map(type_to_wp_type).collect(); + Self::emit_call_raw( + assembler, + &mut self.value_stack, + label, + ¶m_types, + &return_types, + )?; + } + Operator::CallIndirect { index, table_index } => { + if table_index != 0 { + return Err(CodegenError { + message: "only one table is supported", + }); + } + let local_or_import = if module_info.tables.len() > 0 { + if module_info.tables.len() != 1 || module_info.imported_tables.len() != 0 { + return Err(CodegenError { + message: "only one table is supported", + }); + } + CallIndirectLocalOrImport::Local + } else if module_info.imported_tables.len() > 0 { + if module_info.tables.len() != 0 || module_info.imported_tables.len() != 1 { + return Err(CodegenError { + message: "only one table is supported", + }); + } + CallIndirectLocalOrImport::Import + } else { + return Err(CodegenError { + message: "no tables", + }); + }; + let sig_index = SigIndex::new(index as usize); + let sig = match self.signatures.get(sig_index) { + Some(x) => x, + None => { + return Err(CodegenError { + message: "signature does not exist", + }); + } + }; + let mut param_types: Vec = + sig.params().iter().cloned().map(type_to_wp_type).collect(); + let return_types: Vec = + sig.returns().iter().cloned().map(type_to_wp_type).collect(); + param_types.push(WpType::I32); // element index + + dynasm!( + assembler + ; jmp >after_trampoline + ); + + let trampoline_label = Self::emit_native_call_trampoline( + assembler, + call_indirect, + index as usize, + local_or_import, + ); + + dynasm!( + assembler + ; after_trampoline: + ); + + Self::emit_call_raw( + assembler, + &mut self.value_stack, + trampoline_label, + ¶m_types, + &return_types, + )?; + } + Operator::End => { + if self.control_stack.as_ref().unwrap().frames.len() == 1 { + let frame = self.control_stack.as_mut().unwrap().frames.pop().unwrap(); + + if !was_unreachable { + Self::emit_leave_frame(assembler, &frame, &mut self.value_stack, false)?; + } else { + self.value_stack.reset_depth(0); + } + + dynasm!( + assembler + ; =>frame.label + ); + } else { + Self::emit_block_end( + assembler, + self.control_stack.as_mut().unwrap(), + &mut self.value_stack, + was_unreachable, + )?; + } + } + Operator::Loop { ty } => { + let label = assembler.new_dynamic_label(); + self.control_stack + .as_mut() + .unwrap() + .frames + .push(ControlFrame { + label: label, + loop_like: true, + if_else: IfElseState::None, + returns: match ty { + WpType::EmptyBlockType => vec![], + _ => vec![ty], + }, + value_stack_depth_before: self.value_stack.values.len(), + }); + dynasm!( + assembler + ; =>label + ); + } + Operator::If { ty } => { + let label_end = assembler.new_dynamic_label(); + let label_else = assembler.new_dynamic_label(); + + Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; // TODO: typeck? + + self.control_stack + .as_mut() + .unwrap() + .frames + .push(ControlFrame { + label: label_end, + loop_like: false, + if_else: IfElseState::If(label_else), + returns: match ty { + WpType::EmptyBlockType => vec![], + _ => vec![ty], + }, + value_stack_depth_before: self.value_stack.values.len(), + }); + dynasm!( + assembler + ; cmp eax, 0 + ; je =>label_else + ); + } + Operator::Else => { + Self::emit_else( + assembler, + self.control_stack.as_mut().unwrap(), + &mut self.value_stack, + was_unreachable, + )?; + } + Operator::Select => { + Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; + let v_b = self.value_stack.pop()?; + let v_a = self.value_stack.pop()?; + + if v_b.ty != v_a.ty { + return Err(CodegenError { + message: "select: type mismatch", + }); + } + + dynasm!( + assembler + ; cmp eax, 0 + ); + match v_b.location { + ValueLocation::Stack => { + dynasm!( + assembler + ; cmove rax, [rsp] + ; add rsp, 8 + ); + } + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; cmove rax, Rq(reg as u8) + ); + } + } + match v_a.location { + ValueLocation::Stack => { + dynasm!( + assembler + ; cmovne rax, [rsp] + ; add rsp, 8 + ); + } + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; cmovne rax, Rq(reg as u8) + ); + } + } + + Self::emit_push_from_ax(assembler, &mut self.value_stack, v_a.ty)?; + } + Operator::Br { relative_depth } => { + Self::emit_jmp( + assembler, + self.control_stack.as_ref().unwrap(), + &mut self.value_stack, + relative_depth as usize, + )?; + self.unreachable_depth = 1; + } + Operator::BrIf { relative_depth } => { + let no_br_label = assembler.new_dynamic_label(); + Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; // TODO: typeck? + dynasm!( + assembler + ; cmp eax, 0 + ; je =>no_br_label + ); + Self::emit_jmp( + assembler, + self.control_stack.as_ref().unwrap(), + &mut self.value_stack, + relative_depth as usize, + )?; + dynasm!( + assembler + ; =>no_br_label + ); + } + Operator::BrTable { table } => { + let (targets, default_target) = match table.read_table() { + Ok(x) => x, + Err(_) => { + return Err(CodegenError { + message: "cannot read br table", + }); + } + }; + let cond_ty = Self::emit_pop_into_ax(assembler, &mut self.value_stack)?; + if cond_ty != WpType::I32 { + return Err(CodegenError { + message: "expecting i32 for BrTable condition", + }); + } + let mut table = vec![0usize; targets.len()]; + dynasm!( + assembler + ; cmp eax, targets.len() as i32 + ; jae >default_br + ; shl rax, 3 + ; push rcx + ; mov rcx, QWORD table.as_ptr() as usize as i64 + ; add rax, rcx + ; pop rcx + ; mov rax, [rax] // assuming upper 32 bits of rax are zeroed + ; jmp rax + ); + for (i, target) in targets.iter().enumerate() { + let AssemblyOffset(offset) = assembler.offset(); + table[i] = offset; + Self::emit_jmp( + assembler, + self.control_stack.as_ref().unwrap(), + &mut self.value_stack, + *target as usize, + )?; // This does not actually modify value_stack. + } + dynasm!( + assembler + ; default_br: + ); + Self::emit_jmp( + assembler, + self.control_stack.as_ref().unwrap(), + &mut self.value_stack, + default_target as usize, + )?; + self.br_table_data.as_mut().unwrap().push(table); + self.unreachable_depth = 1; + } + Operator::I32Load { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; mov Rd(reg as u8), [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I32, + module_info, + 4, + )?; + } + Operator::I32Load8U { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; movzx Rd(reg as u8), BYTE [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I32, + module_info, + 1, + )?; + } + Operator::I32Load8S { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; movsx Rd(reg as u8), BYTE [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I32, + module_info, + 1, + )?; + } + Operator::I32Load16U { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; movzx Rd(reg as u8), WORD [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I32, + module_info, + 2, + )?; + } + Operator::I32Load16S { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; movsx Rd(reg as u8), WORD [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I32, + module_info, + 2, + )?; + } + Operator::I32Store { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rd(value_reg as u8) + ); + }, + WpType::I32, + module_info, + 4, + )?; + } + Operator::I32Store8 { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rb(value_reg as u8) + ); + }, + WpType::I32, + module_info, + 1, + )?; + } + Operator::I32Store16 { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rw(value_reg as u8) + ); + }, + WpType::I32, + module_info, + 2, + )?; + } + Operator::I64Load { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; mov Rq(reg as u8), [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I64, + module_info, + 8, + )?; + } + Operator::I64Load8U { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; movzx Rq(reg as u8), BYTE [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I64, + module_info, + 1, + )?; + } + Operator::I64Load8S { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; movsx Rq(reg as u8), BYTE [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I64, + module_info, + 1, + )?; + } + Operator::I64Load16U { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; movzx Rq(reg as u8), WORD [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I64, + module_info, + 2, + )?; + } + Operator::I64Load16S { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; movsx Rq(reg as u8), WORD [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I64, + module_info, + 2, + )?; + } + Operator::I64Load32U { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; mov Rd(reg as u8), DWORD [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I64, + module_info, + 4, + )?; + } + Operator::I64Load32S { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; movsx Rq(reg as u8), DWORD [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::I64, + module_info, + 4, + )?; + } + Operator::I64Store { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rq(value_reg as u8) + ); + }, + WpType::I64, + module_info, + 8, + )?; + } + Operator::I64Store8 { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rb(value_reg as u8) + ); + }, + WpType::I64, + module_info, + 1, + )?; + } + Operator::I64Store16 { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rw(value_reg as u8) + ); + }, + WpType::I64, + module_info, + 2, + )?; + } + Operator::I64Store32 { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rd(value_reg as u8) + ); + }, + WpType::I64, + module_info, + 4, + )?; + } + Operator::F32Const { value } => { + let location = self.value_stack.push(WpType::F32); + match location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rd(reg as u8), value.bits() as i32 + ); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; push value.bits() as i32 + ); + } + } + } + Operator::F64Const { value } => { + let location = self.value_stack.push(WpType::F64); + match location { + ValueLocation::Register(x) => { + let reg = Register::from_scratch_reg(x); + dynasm!( + assembler + ; mov Rq(reg as u8), QWORD value.bits() as i64 + ); + } + ValueLocation::Stack => { + dynasm!( + assembler + ; mov rax, QWORD value.bits() as i64 + ; push rax + ); + } + } + } + Operator::F32Load { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; mov Rd(reg as u8), [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::F32, + module_info, + 4, + )?; + } + Operator::F32Store { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rd(value_reg as u8) + ); + }, + WpType::F32, + module_info, + 4, + )?; + } + Operator::F64Load { memarg } => { + Self::emit_memory_load( + assembler, + &mut self.value_stack, + |assembler, reg| { + dynasm!( + assembler + ; mov Rq(reg as u8), [Rq(reg as u8) + memarg.offset as i32] + ); + }, + WpType::F64, + module_info, + 8, + )?; + } + Operator::F64Store { memarg } => { + Self::emit_memory_store( + assembler, + &mut self.value_stack, + |assembler, addr_reg, value_reg| { + dynasm!( + assembler + ; mov [Rq(addr_reg as u8) + memarg.offset as i32], Rq(value_reg as u8) + ); + }, + WpType::F64, + module_info, + 8, + )?; + } + Operator::I32ReinterpretF32 => { + Self::emit_reinterpret(&mut self.value_stack, WpType::F32, WpType::I32)?; + } + Operator::F32ReinterpretI32 => { + Self::emit_reinterpret(&mut self.value_stack, WpType::I32, WpType::F32)?; + } + Operator::I64ReinterpretF64 => { + Self::emit_reinterpret(&mut self.value_stack, WpType::F64, WpType::I64)?; + } + Operator::F64ReinterpretI64 => { + Self::emit_reinterpret(&mut self.value_stack, WpType::I64, WpType::F64)?; + } + Operator::F32ConvertSI32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; cvtsi2ss xmm1, Rd(reg as u8) + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::I32, + WpType::F32, + )?; + } + Operator::F32ConvertUI32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; mov Rd(reg as u8), Rd(reg as u8) // clear upper 32 bits + ; cvtsi2ss xmm1, Rq(reg as u8) + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::I32, + WpType::F32, + )?; + } + Operator::F32ConvertSI64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; cvtsi2ss xmm1, Rq(reg as u8) + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::I64, + WpType::F32, + )?; + } + /* + 0: 48 85 ff test %rdi,%rdi + 3: 78 0b js 10 + 5: c4 e1 fb 2a c7 vcvtsi2sd %rdi,%xmm0,%xmm0 + a: c3 retq + b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) + 10: 48 89 f8 mov %rdi,%rax + 13: 83 e7 01 and $0x1,%edi + 16: 48 d1 e8 shr %rax + 19: 48 09 f8 or %rdi,%rax + 1c: c4 e1 fb 2a c0 vcvtsi2sd %rax,%xmm0,%xmm0 + 21: c5 fb 58 c0 vaddsd %xmm0,%xmm0,%xmm0 + 25: c3 retq + */ + Operator::F32ConvertUI64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; test Rq(reg as u8), Rq(reg as u8) + ; js >do_convert + ; cvtsi2ss xmm1, Rq(reg as u8) + ; movd Rd(reg as u8), xmm1 + ; jmp >end_convert + ; do_convert: + ; movq xmm5, r15 + ; mov r15, Rq(reg as u8) + ; and r15, 1 + ; shr Rq(reg as u8), 1 + ; or Rq(reg as u8), r15 + ; cvtsi2ss xmm1, Rq(reg as u8) + ; addss xmm1, xmm1 + ; movq r15, xmm5 + ; movd Rd(reg as u8), xmm1 + ; end_convert: + ); + }, + WpType::I64, + WpType::F32, + )?; + } + Operator::F64ConvertSI32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; cvtsi2sd xmm1, Rd(reg as u8) + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::I32, + WpType::F64, + )?; + } + Operator::F64ConvertUI32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; mov Rd(reg as u8), Rd(reg as u8) // clear upper 32 bits + ; cvtsi2sd xmm1, Rq(reg as u8) + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::I32, + WpType::F64, + )?; + } + Operator::F64ConvertSI64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; cvtsi2sd xmm1, Rq(reg as u8) + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::I64, + WpType::F64, + )?; + } + Operator::F64ConvertUI64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; test Rq(reg as u8), Rq(reg as u8) + ; js >do_convert + ; cvtsi2sd xmm1, Rq(reg as u8) + ; movq Rq(reg as u8), xmm1 + ; jmp >end_convert + ; do_convert: + ; movq xmm5, r15 + ; mov r15, Rq(reg as u8) + ; and r15, 1 + ; shr Rq(reg as u8), 1 + ; or Rq(reg as u8), r15 + ; cvtsi2sd xmm1, Rq(reg as u8) + ; addsd xmm1, xmm1 + ; movq r15, xmm5 + ; movq Rq(reg as u8), xmm1 + ; end_convert: + ); + }, + WpType::I64, + WpType::F64, + )?; + } + Operator::F64PromoteF32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; cvtss2sd xmm1, xmm1 + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::F64, + )?; + } + Operator::F32DemoteF64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; cvtsd2ss xmm1, xmm1 + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::F32, + )?; + } + Operator::F32Add => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; addss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Sub => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; subss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Mul => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; mulss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Div => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; divss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Max => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; maxss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Min => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; minss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Eq => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; cmpeqss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F32, + WpType::I32, + )?; + } + Operator::F32Ne => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; cmpneqss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F32, + WpType::I32, + )?; + } + Operator::F32Gt => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; vcmpgtss xmm1, xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F32, + WpType::I32, + )?; + } + Operator::F32Ge => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; vcmpgess xmm1, xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F32, + WpType::I32, + )?; + } + Operator::F32Lt => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; cmpltss xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F32, + WpType::I32, + )?; + } + Operator::F32Le => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; cmpless xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F32, + WpType::I32, + )?; + } + Operator::F32Copysign => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movd xmm1, Rd(left as u8) + ; movd xmm2, Rd(right as u8) + ; mov eax, 0x7fffffffu32 as i32 + ; movd xmm3, eax + ; pand xmm1, xmm3 + ; mov eax, 0x80000000u32 as i32 + ; movd xmm3, eax + ; pand xmm2, xmm3 + ; por xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Sqrt => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; sqrtss xmm1, xmm1 + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Abs => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; and Rd(reg as u8), 0x7fffffffu32 as i32 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Neg => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; btc Rd(reg as u8), 31 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Nearest => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; roundss xmm1, xmm1, 0 + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Floor => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; roundss xmm1, xmm1, 1 + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Ceil => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; roundss xmm1, xmm1, 2 + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::F32Trunc => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; roundss xmm1, xmm1, 3 + ; movd Rd(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::F32, + )?; + } + Operator::I32TruncUF32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + Self::emit_f32_int_conv_check(assembler, reg, -1.0, 4294967296.0); + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; cvttss2si Rq(reg as u8), xmm1 + ; mov Rd(reg as u8), Rd(reg as u8) + ); + }, + WpType::F32, + WpType::I32, + )?; + } + Operator::I32TruncSF32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + Self::emit_f32_int_conv_check(assembler, reg, -2147483904.0, 2147483648.0); + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; cvttss2si Rd(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::I32, + )?; + } + Operator::I64TruncUF32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + Self::emit_f32_int_conv_check(assembler, reg, -1.0, 18446744073709551616.0); + /* + LCPI0_0: + .long 1593835520 ## float 9.22337203E+18 + + movss LCPI0_0(%rip), %xmm1 ## xmm1 = mem[0],zero,zero,zero + movaps %xmm0, %xmm2 + subss %xmm1, %xmm2 + cvttss2si %xmm2, %rax + movabsq $-9223372036854775808, %rcx ## imm = 0x8000000000000000 + xorq %rax, %rcx + cvttss2si %xmm0, %rax + ucomiss %xmm1, %xmm0 + cmovaeq %rcx, %rax + */ + dynasm!( + assembler + ; movq xmm5, r15 + ; mov r15d, 1593835520u32 as i32 //float 9.22337203E+18 + ; movd xmm1, r15d + ; movd xmm2, Rd(reg as u8) + ; movd xmm3, Rd(reg as u8) + ; subss xmm2, xmm1 + ; cvttss2si Rq(reg as u8), xmm2 + ; mov r15, QWORD 0x8000000000000000u64 as i64 + ; xor r15, Rq(reg as u8) + ; cvttss2si Rq(reg as u8), xmm3 + ; ucomiss xmm3, xmm1 + ; cmovae Rq(reg as u8), r15 + ; movq r15, xmm5 + ); + }, + WpType::F32, + WpType::I64, + )?; + } + Operator::I64TruncSF32 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + Self::emit_f32_int_conv_check( + assembler, + reg, + -9223373136366403584.0, + 9223372036854775808.0, + ); + dynasm!( + assembler + ; movd xmm1, Rd(reg as u8) + ; cvttss2si Rq(reg as u8), xmm1 + ); + }, + WpType::F32, + WpType::I64, + )?; + } + Operator::F64Add => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; addsd xmm1, xmm2 + ; movq Rq(left as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Sub => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; subsd xmm1, xmm2 + ; movq Rq(left as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Mul => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; mulsd xmm1, xmm2 + ; movq Rq(left as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Div => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; divsd xmm1, xmm2 + ; movq Rq(left as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Max => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; maxsd xmm1, xmm2 + ; movq Rq(left as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Min => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; minsd xmm1, xmm2 + ; movq Rq(left as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Eq => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; cmpeqsd xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F64, + WpType::I32, + )?; + } + Operator::F64Ne => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; cmpneqsd xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F64, + WpType::I32, + )?; + } + Operator::F64Gt => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; vcmpgtsd xmm1, xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F64, + WpType::I32, + )?; + } + Operator::F64Ge => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; vcmpgesd xmm1, xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F64, + WpType::I32, + )?; + } + Operator::F64Lt => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; cmpltsd xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F64, + WpType::I32, + )?; + } + Operator::F64Le => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; cmplesd xmm1, xmm2 + ; movd Rd(left as u8), xmm1 + ; and Rd(left as u8), 1 + ); + }, + WpType::F64, + WpType::I32, + )?; + } + Operator::F64Copysign => { + Self::emit_binop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, left, right| { + dynasm!( + assembler + ; movq xmm1, Rq(left as u8) + ; movq xmm2, Rq(right as u8) + ; mov rax, QWORD 0x7fffffffffffffffu64 as i64 + ; movq xmm3, rax + ; pand xmm1, xmm3 + ; mov rax, QWORD 0x8000000000000000u64 as i64 + ; movq xmm3, rax + ; pand xmm2, xmm3 + ; por xmm1, xmm2 + ; movq Rq(left as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Sqrt => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; sqrtsd xmm1, xmm1 + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Abs => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; mov rax, QWORD 0x7fffffffffffffff + ; movq xmm2, rax + ; pand xmm1, xmm2 + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Neg => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; btc Rq(reg as u8), 63 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Nearest => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; roundsd xmm1, xmm1, 0 + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Floor => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; roundsd xmm1, xmm1, 1 + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Ceil => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; roundsd xmm1, xmm1, 2 + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::F64Trunc => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; roundsd xmm1, xmm1, 3 + ; movq Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::F64, + )?; + } + Operator::I32TruncUF64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + Self::emit_f64_int_conv_check(assembler, reg, -1.0, 4294967296.0); + + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; cvttsd2si Rq(reg as u8), xmm1 + ; mov Rd(reg as u8), Rd(reg as u8) + ); + }, + WpType::F64, + WpType::I32, + )?; + } + Operator::I32TruncSF64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + Self::emit_f64_int_conv_check(assembler, reg, -2147483649.0, 2147483648.0); + + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; cvttsd2si Rd(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::I32, + )?; + } + Operator::I64TruncUF64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + Self::emit_f64_int_conv_check(assembler, reg, -1.0, 18446744073709551616.0); + + /* + LCPI0_0: + .quad 4890909195324358656 ## double 9.2233720368547758E+18 + + movsd LCPI0_0(%rip), %xmm1 ## xmm1 = mem[0],zero + movapd %xmm0, %xmm2 + subsd %xmm1, %xmm2 + cvttsd2si %xmm2, %rax + movabsq $-9223372036854775808, %rcx ## imm = 0x8000000000000000 + xorq %rax, %rcx + cvttsd2si %xmm0, %rax + ucomisd %xmm1, %xmm0 + cmovaeq %rcx, %rax + */ + + dynasm!( + assembler + ; movq xmm5, r15 + ; mov r15, QWORD 4890909195324358656u64 as i64 //double 9.2233720368547758E+18 + ; movq xmm1, r15 + ; movq xmm2, Rq(reg as u8) + ; movq xmm3, Rq(reg as u8) + ; subsd xmm2, xmm1 + ; cvttsd2si Rq(reg as u8), xmm2 + ; mov r15, QWORD 0x8000000000000000u64 as i64 + ; xor r15, Rq(reg as u8) + ; cvttsd2si Rq(reg as u8), xmm3 + ; ucomisd xmm3, xmm1 + ; cmovae Rq(reg as u8), r15 + ; movq r15, xmm5 + ); + }, + WpType::F64, + WpType::I64, + )?; + } + Operator::I64TruncSF64 => { + Self::emit_unop( + assembler, + &mut self.value_stack, + |assembler, _value_stack, reg| { + Self::emit_f64_int_conv_check( + assembler, + reg, + -9223372036854777856.0, + 9223372036854775808.0, + ); + + dynasm!( + assembler + ; movq xmm1, Rq(reg as u8) + ; cvttsd2si Rq(reg as u8), xmm1 + ); + }, + WpType::F64, + WpType::I64, + )?; + } + Operator::Nop => {} + Operator::MemorySize { reserved } => { + let memory_index = MemoryIndex::new(reserved as usize); + let label = match memory_index.local_or_import(module_info) { + LocalOrImport::Local(local_mem_index) => { + let mem_desc = &module_info.memories[local_mem_index]; + match mem_desc.memory_type() { + MemoryType::Dynamic => { + self.native_trampolines.memory_size_dynamic_local + } + MemoryType::Static => self.native_trampolines.memory_size_static_local, + MemoryType::SharedStatic => { + self.native_trampolines.memory_size_shared_local + } + } + } + LocalOrImport::Import(import_mem_index) => { + let mem_desc = &module_info.imported_memories[import_mem_index].1; + match mem_desc.memory_type() { + MemoryType::Dynamic => { + self.native_trampolines.memory_size_dynamic_import + } + MemoryType::Static => self.native_trampolines.memory_size_static_import, + MemoryType::SharedStatic => { + self.native_trampolines.memory_size_shared_import + } + } + } + }; + Self::emit_call_raw(assembler, &mut self.value_stack, label, &[], &[WpType::I32])?; + } + Operator::MemoryGrow { reserved } => { + let memory_index = MemoryIndex::new(reserved as usize); + let label = match memory_index.local_or_import(module_info) { + LocalOrImport::Local(local_mem_index) => { + let mem_desc = &module_info.memories[local_mem_index]; + match mem_desc.memory_type() { + MemoryType::Dynamic => { + self.native_trampolines.memory_grow_dynamic_local + } + MemoryType::Static => self.native_trampolines.memory_grow_static_local, + MemoryType::SharedStatic => { + self.native_trampolines.memory_grow_shared_local + } + } + } + LocalOrImport::Import(import_mem_index) => { + let mem_desc = &module_info.imported_memories[import_mem_index].1; + match mem_desc.memory_type() { + MemoryType::Dynamic => { + self.native_trampolines.memory_grow_dynamic_import + } + MemoryType::Static => self.native_trampolines.memory_grow_static_import, + MemoryType::SharedStatic => { + self.native_trampolines.memory_grow_shared_import + } + } + } + }; + Self::emit_call_raw( + assembler, + &mut self.value_stack, + label, + &[WpType::I32], + &[WpType::I32], + )?; + Self::emit_update_memory_from_ctx(assembler, module_info)?; + } + _ => { + panic!("{:?}", op); + } + } + Ok(()) + } + + fn finalize(&mut self) -> Result<(), CodegenError> { + let assembler = self.assembler.as_mut().unwrap(); + + dynasm!( + assembler + ; mov rsp, rbp + ; pop rbp + ; ret + ); + + if self.value_stack.values.len() != 0 + || self.control_stack.as_ref().unwrap().frames.len() != 0 + { + return Err(CodegenError { + message: "control/value stack not empty at end of function", + }); + } + + Ok(()) + } +} + +fn get_size_of_type(ty: &WpType) -> Result { + match *ty { + WpType::I32 | WpType::F32 => Ok(4), + WpType::I64 | WpType::F64 => Ok(8), + _ => Err(CodegenError { + message: "unknown type", + }), + } +} + +fn is_dword(n: usize) -> bool { + n == 4 +} + +fn type_to_wp_type(ty: Type) -> WpType { + match ty { + Type::I32 => WpType::I32, + Type::I64 => WpType::I64, + Type::F32 => WpType::F32, + Type::F64 => WpType::F64, + } +} + +unsafe extern "C" fn invoke_import( + _unused: usize, + import_id: usize, + stack_top: *mut u8, + stack_base: *mut u8, + _vmctx: *mut vm::Ctx, + _memory_base: *mut u8, +) -> u64 { + let vmctx: &mut vm::InternalCtx = &mut *(_vmctx as *mut vm::InternalCtx); + let import = (*vmctx.imported_funcs.offset(import_id as isize)).func; + + CONSTRUCT_STACK_AND_CALL_NATIVE(stack_top, stack_base, _vmctx, import) +} + +#[repr(u64)] +#[derive(Copy, Clone, Debug)] +enum CallIndirectLocalOrImport { + Local, + Import, +} + +#[allow(clippy::cast_ptr_alignment)] +unsafe extern "C" fn call_indirect( + sig_index: usize, + local_or_import: CallIndirectLocalOrImport, + mut stack_top: *mut u8, + stack_base: *mut u8, + vmctx: *mut vm::Ctx, + _memory_base: *mut u8, +) -> u64 { + let elem_index = *(stack_top as *mut u32) as usize; + stack_top = stack_top.offset(8); + assert!(stack_top as usize <= stack_base as usize); + + let table: &LocalTable = match local_or_import { + CallIndirectLocalOrImport::Local => &*(*(*(vmctx as *mut vm::InternalCtx)).tables), + CallIndirectLocalOrImport::Import => { + &*(*(*(vmctx as *mut vm::InternalCtx)).imported_tables) + } + }; + if elem_index >= table.count as usize { + eprintln!("element index out of bounds"); + protect_unix::trigger_trap(); + } + let anyfunc = &*(table.base as *mut vm::Anyfunc).offset(elem_index as isize); + let dynamic_sigindex = *(*(vmctx as *mut vm::InternalCtx)) + .dynamic_sigindices + .offset(sig_index as isize); + + if anyfunc.func.is_null() { + eprintln!("null anyfunc"); + protect_unix::trigger_trap(); + } + + if anyfunc.sig_id.0 != dynamic_sigindex.0 { + eprintln!("signature mismatch"); + protect_unix::trigger_trap(); + } + + CONSTRUCT_STACK_AND_CALL_NATIVE(stack_top, stack_base, anyfunc.ctx, anyfunc.func) +} + +#[repr(u64)] +#[derive(Copy, Clone, Debug)] +enum MemoryKind { + DynamicLocal, + StaticLocal, + SharedLocal, + DynamicImport, + StaticImport, + SharedImport, +} + +unsafe extern "C" fn _memory_size( + op: MemoryKind, + index: usize, + _stack_top: *mut u8, + _stack_base: *mut u8, + vmctx: *mut vm::Ctx, + _memory_base: *mut u8, +) -> u64 { + use wasmer_runtime_core::vmcalls; + let ret = match op { + MemoryKind::DynamicLocal => { + vmcalls::local_dynamic_memory_size(&*vmctx, LocalMemoryIndex::new(index)) + } + MemoryKind::StaticLocal => { + vmcalls::local_static_memory_size(&*vmctx, LocalMemoryIndex::new(index)) + } + MemoryKind::SharedLocal => unreachable!(), + MemoryKind::DynamicImport => { + vmcalls::imported_dynamic_memory_size(&*vmctx, ImportedMemoryIndex::new(index)) + } + MemoryKind::StaticImport => { + vmcalls::imported_static_memory_size(&*vmctx, ImportedMemoryIndex::new(index)) + } + MemoryKind::SharedImport => unreachable!(), + }; + ret.0 as u32 as u64 +} + +#[allow(clippy::cast_ptr_alignment)] +unsafe extern "C" fn _memory_grow( + op: MemoryKind, + index: usize, + stack_top: *mut u8, + stack_base: *mut u8, + vmctx: *mut vm::Ctx, + _memory_base: *mut u8, +) -> u64 { + use wasmer_runtime_core::vmcalls; + assert_eq!(stack_base as usize - stack_top as usize, 8); + let pages = Pages(*(stack_top as *mut u32)); + let ret = match op { + MemoryKind::DynamicLocal => { + vmcalls::local_dynamic_memory_grow(&mut *vmctx, LocalMemoryIndex::new(index), pages) + } + MemoryKind::StaticLocal => { + vmcalls::local_static_memory_grow(&mut *vmctx, LocalMemoryIndex::new(index), pages) + } + MemoryKind::SharedLocal => unreachable!(), + MemoryKind::DynamicImport => vmcalls::imported_dynamic_memory_grow( + &mut *vmctx, + ImportedMemoryIndex::new(index), + pages, + ), + MemoryKind::StaticImport => vmcalls::imported_static_memory_grow( + &mut *vmctx, + ImportedMemoryIndex::new(index), + pages, + ), + MemoryKind::SharedImport => unreachable!(), + }; + ret as u32 as u64 +} diff --git a/lib/dynasm-backend/src/lib.rs b/lib/dynasm-backend/src/lib.rs new file mode 100644 index 00000000000..1647611bb3e --- /dev/null +++ b/lib/dynasm-backend/src/lib.rs @@ -0,0 +1,87 @@ +#![feature(proc_macro_hygiene)] + +#[cfg(not(any( + all(target_os = "macos", target_arch = "x86_64"), + all(target_os = "linux", target_arch = "x86_64"), +)))] +compile_error!("This crate doesn't yet support compiling on operating systems other than linux and macos and architectures other than x86_64"); + +extern crate dynasmrt; + +#[macro_use] +extern crate dynasm; + +#[macro_use] +extern crate lazy_static; + +extern crate byteorder; + +mod codegen; +mod codegen_x64; +mod parse; +mod protect_unix; +mod stack; + +use crate::codegen::{CodegenError, ModuleCodeGenerator}; +use crate::parse::LoadError; +use wasmer_runtime_core::{ + backend::{sys::Memory, Backend, CacheGen, Compiler, Token}, + cache::{Artifact, Error as CacheError}, + error::{CompileError, CompileResult}, + module::{ModuleInfo, ModuleInner}, +}; + +struct Placeholder; +impl CacheGen for Placeholder { + fn generate_cache( + &self, + _module: &ModuleInner, + ) -> Result<(Box, Box<[u8]>, Memory), CacheError> { + Err(CacheError::Unknown( + "the dynasm backend doesn't support caching yet".to_string(), + )) + } +} + +pub struct SinglePassCompiler {} +impl SinglePassCompiler { + pub fn new() -> Self { + Self {} + } +} + +impl Compiler for SinglePassCompiler { + fn compile(&self, wasm: &[u8], _: Token) -> CompileResult { + let mut mcg = codegen_x64::X64ModuleCodeGenerator::new(); + let info = parse::read_module(wasm, Backend::Dynasm, &mut mcg)?; + let (ec, resolver) = mcg.finalize(&info)?; + Ok(ModuleInner { + cache_gen: Box::new(Placeholder), + func_resolver: Box::new(resolver), + protected_caller: Box::new(ec), + info: info, + }) + } + + unsafe fn from_cache(&self, _artifact: Artifact, _: Token) -> Result { + Err(CacheError::Unknown( + "the dynasm backend doesn't support caching yet".to_string(), + )) + } +} + +impl From for CompileError { + fn from(other: CodegenError) -> CompileError { + CompileError::InternalError { + msg: other.message.into(), + } + } +} + +impl From for CompileError { + fn from(other: LoadError) -> CompileError { + CompileError::InternalError { + msg: format!("{:?}", other), + } + } +} diff --git a/lib/dynasm-backend/src/parse.rs b/lib/dynasm-backend/src/parse.rs new file mode 100644 index 00000000000..7e918e9426e --- /dev/null +++ b/lib/dynasm-backend/src/parse.rs @@ -0,0 +1,441 @@ +use crate::codegen::{CodegenError, FunctionCodeGenerator, ModuleCodeGenerator}; +use wasmer_runtime_core::{ + backend::{Backend, FuncResolver, ProtectedCaller}, + module::{ + DataInitializer, ExportIndex, ImportName, ModuleInfo, StringTable, StringTableBuilder, + TableInitializer, + }, + structures::{Map, TypedIndex}, + types::{ + ElementType, FuncIndex, FuncSig, GlobalDescriptor, GlobalIndex, GlobalInit, + ImportedGlobalIndex, Initializer, MemoryDescriptor, MemoryIndex, SigIndex, TableDescriptor, + TableIndex, Type, Value, + }, + units::Pages, +}; +use wasmparser::{ + BinaryReaderError, Data, DataKind, Element, ElementKind, Export, ExternalKind, FuncType, + Import, ImportSectionEntryType, InitExpr, ModuleReader, Operator, SectionCode, Type as WpType, + WasmDecoder, +}; + +#[derive(Debug)] +pub enum LoadError { + Parse(BinaryReaderError), + Codegen(CodegenError), +} + +impl From for LoadError { + fn from(other: BinaryReaderError) -> LoadError { + LoadError::Parse(other) + } +} + +impl From for LoadError { + fn from(other: CodegenError) -> LoadError { + LoadError::Codegen(other) + } +} + +fn validate(bytes: &[u8]) -> Result<(), LoadError> { + let mut parser = wasmparser::ValidatingParser::new( + bytes, + Some(wasmparser::ValidatingParserConfig { + operator_config: wasmparser::OperatorValidatorConfig { + enable_threads: false, + enable_reference_types: false, + enable_simd: false, + enable_bulk_memory: false, + }, + mutable_global_imports: false, + }), + ); + + loop { + let state = parser.read(); + match *state { + wasmparser::ParserState::EndWasm => break Ok(()), + wasmparser::ParserState::Error(err) => Err(LoadError::Parse(err))?, + _ => {} + } + } +} + +pub fn read_module< + MCG: ModuleCodeGenerator, + FCG: FunctionCodeGenerator, + PC: ProtectedCaller, + FR: FuncResolver, +>( + wasm: &[u8], + backend: Backend, + mcg: &mut MCG, +) -> Result { + validate(wasm)?; + let mut info = ModuleInfo { + memories: Map::new(), + globals: Map::new(), + tables: Map::new(), + + imported_functions: Map::new(), + imported_memories: Map::new(), + imported_tables: Map::new(), + imported_globals: Map::new(), + + exports: Default::default(), + + data_initializers: Vec::new(), + elem_initializers: Vec::new(), + + start_func: None, + + func_assoc: Map::new(), + signatures: Map::new(), + backend: backend, + + namespace_table: StringTable::new(), + name_table: StringTable::new(), + }; + + let mut reader = ModuleReader::new(wasm)?; + + loop { + if reader.eof() { + return Ok(info); + } + + let section = reader.read()?; + + match section.code { + SectionCode::Type => { + let type_reader = section.get_type_section_reader()?; + + for ty in type_reader { + let ty = ty?; + info.signatures.push(func_type_to_func_sig(ty)?); + } + + mcg.feed_signatures(info.signatures.clone())?; + } + SectionCode::Import => { + let import_reader = section.get_import_section_reader()?; + let mut namespace_builder = StringTableBuilder::new(); + let mut name_builder = StringTableBuilder::new(); + + for import in import_reader { + let Import { module, field, ty } = import?; + + let namespace_index = namespace_builder.register(module); + let name_index = name_builder.register(field); + let import_name = ImportName { + namespace_index, + name_index, + }; + + match ty { + ImportSectionEntryType::Function(sigindex) => { + let sigindex = SigIndex::new(sigindex as usize); + info.imported_functions.push(import_name); + info.func_assoc.push(sigindex); + mcg.feed_import_function()?; + } + ImportSectionEntryType::Table(table_ty) => { + assert_eq!(table_ty.element_type, WpType::AnyFunc); + let table_desc = TableDescriptor { + element: ElementType::Anyfunc, + minimum: table_ty.limits.initial, + maximum: table_ty.limits.maximum, + }; + + info.imported_tables.push((import_name, table_desc)); + } + ImportSectionEntryType::Memory(memory_ty) => { + let mem_desc = MemoryDescriptor { + minimum: Pages(memory_ty.limits.initial), + maximum: memory_ty.limits.maximum.map(|max| Pages(max)), + shared: memory_ty.shared, + }; + info.imported_memories.push((import_name, mem_desc)); + } + ImportSectionEntryType::Global(global_ty) => { + let global_desc = GlobalDescriptor { + mutable: global_ty.mutable, + ty: wp_type_to_type(global_ty.content_type)?, + }; + info.imported_globals.push((import_name, global_desc)); + } + } + } + + info.namespace_table = namespace_builder.finish(); + info.name_table = name_builder.finish(); + } + SectionCode::Function => { + let func_decl_reader = section.get_function_section_reader()?; + + for sigindex in func_decl_reader { + let sigindex = sigindex?; + + let sigindex = SigIndex::new(sigindex as usize); + info.func_assoc.push(sigindex); + } + + mcg.feed_function_signatures(info.func_assoc.clone())?; + } + SectionCode::Table => { + let table_decl_reader = section.get_table_section_reader()?; + + for table_ty in table_decl_reader { + let table_ty = table_ty?; + + let table_desc = TableDescriptor { + element: ElementType::Anyfunc, + minimum: table_ty.limits.initial, + maximum: table_ty.limits.maximum, + }; + + info.tables.push(table_desc); + } + } + SectionCode::Memory => { + let mem_decl_reader = section.get_memory_section_reader()?; + + for memory_ty in mem_decl_reader { + let memory_ty = memory_ty?; + + let mem_desc = MemoryDescriptor { + minimum: Pages(memory_ty.limits.initial), + maximum: memory_ty.limits.maximum.map(|max| Pages(max)), + shared: memory_ty.shared, + }; + + info.memories.push(mem_desc); + } + } + SectionCode::Global => { + let global_decl_reader = section.get_global_section_reader()?; + + for global in global_decl_reader { + let global = global?; + + let desc = GlobalDescriptor { + mutable: global.ty.mutable, + ty: wp_type_to_type(global.ty.content_type)?, + }; + + let global_init = GlobalInit { + desc, + init: eval_init_expr(&global.init_expr)?, + }; + + info.globals.push(global_init); + } + } + SectionCode::Export => { + let export_reader = section.get_export_section_reader()?; + + for export in export_reader { + let Export { field, kind, index } = export?; + + let export_index = match kind { + ExternalKind::Function => ExportIndex::Func(FuncIndex::new(index as usize)), + ExternalKind::Table => ExportIndex::Table(TableIndex::new(index as usize)), + ExternalKind::Memory => { + ExportIndex::Memory(MemoryIndex::new(index as usize)) + } + ExternalKind::Global => { + ExportIndex::Global(GlobalIndex::new(index as usize)) + } + }; + + info.exports.insert(field.to_string(), export_index); + } + } + SectionCode::Start => { + let start_index = section.get_start_section_content()?; + + info.start_func = Some(FuncIndex::new(start_index as usize)); + } + SectionCode::Element => { + let element_reader = section.get_element_section_reader()?; + + for element in element_reader { + let Element { kind, items } = element?; + + match kind { + ElementKind::Active { + table_index, + init_expr, + } => { + let table_index = TableIndex::new(table_index as usize); + let base = eval_init_expr(&init_expr)?; + let items_reader = items.get_items_reader()?; + + let elements: Vec<_> = items_reader + .into_iter() + .map(|res| res.map(|index| FuncIndex::new(index as usize))) + .collect::>()?; + + let table_init = TableInitializer { + table_index, + base, + elements, + }; + + info.elem_initializers.push(table_init); + } + ElementKind::Passive(_ty) => { + return Err(BinaryReaderError { + message: "passive tables are not yet supported", + offset: -1isize as usize, + } + .into()); + } + } + } + } + SectionCode::Code => { + let mut code_reader = section.get_code_section_reader()?; + if code_reader.get_count() as usize > info.func_assoc.len() { + return Err(BinaryReaderError { + message: "code_reader.get_count() > info.func_assoc.len()", + offset: ::std::usize::MAX, + } + .into()); + } + mcg.check_precondition(&info)?; + for i in 0..code_reader.get_count() { + let item = code_reader.read()?; + let fcg = mcg.next_function()?; + let sig = info + .signatures + .get( + *info + .func_assoc + .get(FuncIndex::new(i as usize + info.imported_functions.len())) + .unwrap(), + ) + .unwrap(); + for ret in sig.returns() { + fcg.feed_return(type_to_wp_type(*ret))?; + } + for param in sig.params() { + fcg.feed_param(type_to_wp_type(*param))?; + } + for local in item.get_locals_reader()? { + let (count, ty) = local?; + fcg.feed_local(ty, count as usize)?; + } + fcg.begin_body()?; + for op in item.get_operators_reader()? { + let op = op?; + fcg.feed_opcode(op, &info)?; + } + fcg.finalize()?; + } + } + SectionCode::Data => { + let data_reader = section.get_data_section_reader()?; + + for data in data_reader { + let Data { kind, data } = data?; + + match kind { + DataKind::Active { + memory_index, + init_expr, + } => { + let memory_index = MemoryIndex::new(memory_index as usize); + let base = eval_init_expr(&init_expr)?; + + let data_init = DataInitializer { + memory_index, + base, + data: data.to_vec(), + }; + + info.data_initializers.push(data_init); + } + DataKind::Passive => { + return Err(BinaryReaderError { + message: "passive memories are not yet supported", + offset: -1isize as usize, + } + .into()); + } + } + } + } + SectionCode::DataCount => {} + SectionCode::Custom { .. } => {} + } + } +} + +pub fn wp_type_to_type(ty: WpType) -> Result { + Ok(match ty { + WpType::I32 => Type::I32, + WpType::I64 => Type::I64, + WpType::F32 => Type::F32, + WpType::F64 => Type::F64, + WpType::V128 => { + return Err(BinaryReaderError { + message: "the wasmer llvm backend does not yet support the simd extension", + offset: -1isize as usize, + }); + } + _ => panic!("broken invariant, invalid type"), + }) +} + +pub fn type_to_wp_type(ty: Type) -> WpType { + match ty { + Type::I32 => WpType::I32, + Type::I64 => WpType::I64, + Type::F32 => WpType::F32, + Type::F64 => WpType::F64, + } +} + +fn func_type_to_func_sig(func_ty: FuncType) -> Result { + assert_eq!(func_ty.form, WpType::Func); + + Ok(FuncSig::new( + func_ty + .params + .iter() + .cloned() + .map(wp_type_to_type) + .collect::, _>>()?, + func_ty + .returns + .iter() + .cloned() + .map(wp_type_to_type) + .collect::, _>>()?, + )) +} + +fn eval_init_expr(expr: &InitExpr) -> Result { + let mut reader = expr.get_operators_reader(); + let (op, offset) = reader.read_with_offset()?; + Ok(match op { + Operator::GetGlobal { global_index } => { + Initializer::GetGlobal(ImportedGlobalIndex::new(global_index as usize)) + } + Operator::I32Const { value } => Initializer::Const(Value::I32(value)), + Operator::I64Const { value } => Initializer::Const(Value::I64(value)), + Operator::F32Const { value } => { + Initializer::Const(Value::F32(f32::from_bits(value.bits()))) + } + Operator::F64Const { value } => { + Initializer::Const(Value::F64(f64::from_bits(value.bits()))) + } + _ => { + return Err(BinaryReaderError { + message: "init expr evaluation failed: unsupported opcode", + offset, + }); + } + }) +} diff --git a/lib/dynasm-backend/src/protect_unix.rs b/lib/dynasm-backend/src/protect_unix.rs new file mode 100644 index 00000000000..4daf633aa71 --- /dev/null +++ b/lib/dynasm-backend/src/protect_unix.rs @@ -0,0 +1,202 @@ +//! Installing signal handlers allows us to handle traps and out-of-bounds memory +//! accesses that occur when runniing webassembly. +//! +//! This code is inspired by: https://github.com/pepyakin/wasmtime/commit/625a2b6c0815b21996e111da51b9664feb174622 +//! +//! When a WebAssembly module triggers any traps, we perform recovery here. +//! +//! This module uses TLS (thread-local storage) to track recovery information. Since the four signals we're handling +//! are very special, the async signal unsafety of Rust's TLS implementation generally does not affect the correctness here +//! unless you have memory unsafety elsewhere in your code. +//! +use libc::{c_int, c_void, siginfo_t}; +use nix::sys::signal::{ + sigaction, SaFlags, SigAction, SigHandler, SigSet, Signal, SIGBUS, SIGFPE, SIGILL, SIGSEGV, +}; +use std::cell::{Cell, UnsafeCell}; +use std::ptr; +use std::sync::Once; +use wasmer_runtime_core::error::{RuntimeError, RuntimeResult}; + +extern "C" fn signal_trap_handler( + signum: ::nix::libc::c_int, + siginfo: *mut siginfo_t, + ucontext: *mut c_void, +) { + unsafe { + do_unwind(signum, siginfo as _, ucontext); + } +} + +extern "C" { + pub fn setjmp(env: *mut c_void) -> c_int; + fn longjmp(env: *mut c_void, val: c_int) -> !; +} + +pub unsafe fn install_sighandler() { + let sa = SigAction::new( + SigHandler::SigAction(signal_trap_handler), + SaFlags::SA_ONSTACK, + SigSet::empty(), + ); + sigaction(SIGFPE, &sa).unwrap(); + sigaction(SIGILL, &sa).unwrap(); + sigaction(SIGSEGV, &sa).unwrap(); + sigaction(SIGBUS, &sa).unwrap(); +} + +const SETJMP_BUFFER_LEN: usize = 27; +pub static SIGHANDLER_INIT: Once = Once::new(); + +thread_local! { + pub static SETJMP_BUFFER: UnsafeCell<[c_int; SETJMP_BUFFER_LEN]> = UnsafeCell::new([0; SETJMP_BUFFER_LEN]); + pub static CAUGHT_ADDRESSES: Cell<(*const c_void, *const c_void)> = Cell::new((ptr::null(), ptr::null())); + pub static CURRENT_EXECUTABLE_BUFFER: Cell<*const c_void> = Cell::new(ptr::null()); +} + +pub unsafe fn trigger_trap() -> ! { + let jmp_buf = SETJMP_BUFFER.with(|buf| buf.get()); + + longjmp(jmp_buf as *mut c_void, 0) +} + +pub fn call_protected(f: impl FnOnce() -> T) -> RuntimeResult { + unsafe { + let jmp_buf = SETJMP_BUFFER.with(|buf| buf.get()); + let prev_jmp_buf = *jmp_buf; + + SIGHANDLER_INIT.call_once(|| { + install_sighandler(); + }); + + let signum = setjmp(jmp_buf as *mut _); + if signum != 0 { + *jmp_buf = prev_jmp_buf; + + let (faulting_addr, _inst_ptr) = CAUGHT_ADDRESSES.with(|cell| cell.get()); + + let signal = match Signal::from_c_int(signum) { + Ok(SIGFPE) => "floating-point exception", + Ok(SIGILL) => "illegal instruction", + Ok(SIGSEGV) => "segmentation violation", + Ok(SIGBUS) => "bus error", + Err(_) => "error while getting the Signal", + _ => "unkown trapped signal", + }; + // When the trap-handler is fully implemented, this will return more information. + Err(RuntimeError::Trap { + msg: format!("unknown trap at {:p} - {}", faulting_addr, signal).into(), + } + .into()) + } else { + let ret = f(); // TODO: Switch stack? + *jmp_buf = prev_jmp_buf; + Ok(ret) + } + } +} + +/// Unwinds to last protected_call. +pub unsafe fn do_unwind(signum: i32, siginfo: *const c_void, ucontext: *const c_void) -> ! { + // Since do_unwind is only expected to get called from WebAssembly code which doesn't hold any host resources (locks etc.) + // itself, accessing TLS here is safe. In case any other code calls this, it often indicates a memory safety bug and you should + // temporarily disable the signal handlers to debug it. + + let jmp_buf = SETJMP_BUFFER.with(|buf| buf.get()); + if *jmp_buf == [0; SETJMP_BUFFER_LEN] { + ::std::process::abort(); + } + + CAUGHT_ADDRESSES.with(|cell| cell.set(get_faulting_addr_and_ip(siginfo, ucontext))); + + longjmp(jmp_buf as *mut ::nix::libc::c_void, signum) +} + +#[cfg(all(target_os = "linux", target_arch = "x86_64"))] +unsafe fn get_faulting_addr_and_ip( + siginfo: *const c_void, + ucontext: *const c_void, +) -> (*const c_void, *const c_void) { + use libc::{ucontext_t, RIP}; + + #[allow(dead_code)] + #[repr(C)] + struct siginfo_t { + si_signo: i32, + si_errno: i32, + si_code: i32, + si_addr: u64, + // ... + } + + let siginfo = siginfo as *const siginfo_t; + let si_addr = (*siginfo).si_addr; + + let ucontext = ucontext as *const ucontext_t; + let rip = (*ucontext).uc_mcontext.gregs[RIP as usize]; + + (si_addr as _, rip as _) +} + +#[cfg(all(target_os = "macos", target_arch = "x86_64"))] +unsafe fn get_faulting_addr_and_ip( + siginfo: *const c_void, + ucontext: *const c_void, +) -> (*const c_void, *const c_void) { + #[allow(dead_code)] + #[repr(C)] + struct ucontext_t { + uc_onstack: u32, + uc_sigmask: u32, + uc_stack: libc::stack_t, + uc_link: *const ucontext_t, + uc_mcsize: u64, + uc_mcontext: *const mcontext_t, + } + #[repr(C)] + struct exception_state { + trapno: u16, + cpu: u16, + err: u32, + faultvaddr: u64, + } + #[repr(C)] + struct regs { + rax: u64, + rbx: u64, + rcx: u64, + rdx: u64, + rdi: u64, + rsi: u64, + rbp: u64, + rsp: u64, + r8: u64, + r9: u64, + r10: u64, + r11: u64, + r12: u64, + r13: u64, + r14: u64, + r15: u64, + rip: u64, + rflags: u64, + cs: u64, + fs: u64, + gs: u64, + } + #[allow(dead_code)] + #[repr(C)] + struct mcontext_t { + es: exception_state, + ss: regs, + // ... + } + + let siginfo = siginfo as *const siginfo_t; + let si_addr = (*siginfo).si_addr; + + let ucontext = ucontext as *const ucontext_t; + let rip = (*(*ucontext).uc_mcontext).ss.rip; + + (si_addr, rip as _) +} diff --git a/lib/dynasm-backend/src/stack.rs b/lib/dynasm-backend/src/stack.rs new file mode 100644 index 00000000000..d237b05b1a4 --- /dev/null +++ b/lib/dynasm-backend/src/stack.rs @@ -0,0 +1,164 @@ +use crate::codegen::CodegenError; +use dynasmrt::DynamicLabel; +use wasmparser::Type as WpType; + +/*#[repr(u8)] +#[derive(Copy, Clone, Debug)] +pub enum RegisterName { + RDI, + RSI, + RDX, + RCX, + R8, + R9, + R10, + R11, + RBX, + R12, + R13, + R14, + R15, + Invalid, +}*/ + +#[derive(Debug, Copy, Clone)] +pub enum IfElseState { + None, + If(DynamicLabel), + Else, +} + +#[derive(Debug)] +pub struct ControlFrame { + pub label: DynamicLabel, + pub loop_like: bool, + pub if_else: IfElseState, + pub returns: Vec, + pub value_stack_depth_before: usize, +} + +#[derive(Debug)] +pub struct ControlStack { + pub frames: Vec, +} + +#[derive(Debug)] +pub struct ValueStack { + pub num_regs: u8, + pub values: Vec, +} + +#[derive(Copy, Clone, Debug)] +pub struct ValueInfo { + pub ty: WpType, + pub location: ValueLocation, +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum ValueLocation { + Register(ScratchRegister), + Stack, +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct ScratchRegister(u8); + +impl ScratchRegister { + pub fn raw_id(&self) -> u8 { + self.0 + } +} + +impl ValueLocation { + pub fn is_register(&self) -> bool { + if let ValueLocation::Register(_) = *self { + true + } else { + false + } + } + + pub fn get_register(&self) -> Result { + if let ValueLocation::Register(id) = *self { + Ok(id) + } else { + Err(CodegenError { + message: "not a register location", + }) + } + } +} + +impl ValueStack { + pub fn new(num_regs: u8) -> ValueStack { + ValueStack { + num_regs: num_regs, + values: vec![], + } + } + + fn next_location(&self, loc: &ValueLocation) -> ValueLocation { + match *loc { + ValueLocation::Register(ScratchRegister(x)) => { + if x >= self.num_regs - 1 { + ValueLocation::Stack + } else { + ValueLocation::Register(ScratchRegister(x + 1)) + } + } + ValueLocation::Stack => ValueLocation::Stack, + } + } + + pub fn push(&mut self, ty: WpType) -> ValueLocation { + let loc = self + .values + .last() + .map(|x| self.next_location(&x.location)) + .unwrap_or(ValueLocation::Register(ScratchRegister(0))); + self.values.push(ValueInfo { + ty: ty, + location: loc, + }); + loc + } + + pub fn pop(&mut self) -> Result { + match self.values.pop() { + Some(x) => Ok(x), + None => Err(CodegenError { + message: "no value on top of stack", + }), + } + } + + pub fn pop2(&mut self) -> Result<(ValueInfo, ValueInfo), CodegenError> { + if self.values.len() < 2 { + Err(CodegenError { + message: "less than 2 values on top of stack", + }) + } else { + let v2 = self.values.pop().unwrap(); + let v1 = self.values.pop().unwrap(); + Ok((v1, v2)) + } + } + + pub fn reset_depth(&mut self, target_depth: usize) { + self.values.truncate(target_depth); + } +} + +impl ControlStack { + pub fn new(label: DynamicLabel, returns: Vec) -> ControlStack { + ControlStack { + frames: vec![ControlFrame { + label: label, + loop_like: false, + if_else: IfElseState::None, + returns: returns, + value_stack_depth_before: 0, + }], + } + } +} diff --git a/lib/emscripten/Cargo.toml b/lib/emscripten/Cargo.toml index cd691e56652..2823b1c4f3e 100644 --- a/lib/emscripten/Cargo.toml +++ b/lib/emscripten/Cargo.toml @@ -14,20 +14,20 @@ lazy_static = "1.2.0" libc = "0.2.49" byteorder = "1" time = "0.1.41" +wasmer-clif-backend = { path = "../clif-backend", version = "0.2.0" } +wasmer-dynasm-backend = { path = "../dynasm-backend", version = "0.1.0", optional = true } +wasmer-llvm-backend = { path = "../llvm-backend", version = "0.1.0", optional = true } [target.'cfg(windows)'.dependencies] rand = "0.6" [dev-dependencies] -wasmer-clif-backend = { path = "../clif-backend", version = "0.2.0" } wabt = "0.7.2" -[target.'cfg(not(windows))'.dev-dependencies] -wasmer-llvm-backend = { path = "../llvm-backend", version = "0.1.0" } - [build-dependencies] glob = "0.2.11" [features] clif = [] -llvm = [] \ No newline at end of file +llvm = ["wasmer-llvm-backend"] +dynasm = ["wasmer-dynasm-backend"] \ No newline at end of file diff --git a/lib/emscripten/src/utils.rs b/lib/emscripten/src/utils.rs index e7c2ae1408b..5dcfd4e1157 100644 --- a/lib/emscripten/src/utils.rs +++ b/lib/emscripten/src/utils.rs @@ -186,7 +186,13 @@ mod tests { LLVMCompiler::new() } - #[cfg(not(any(feature = "llvm", feature = "clif")))] + #[cfg(feature = "dynasm")] + fn get_compiler() -> impl Compiler { + use wasmer_dynasm_backend::SinglePassCompiler; + SinglePassCompiler::new() + } + + #[cfg(not(any(feature = "llvm", feature = "clif", feature = "dynasm")))] fn get_compiler() -> impl Compiler { panic!("compiler not specified, activate a compiler via features"); use wasmer_clif_backend::CraneliftCompiler; diff --git a/lib/emscripten/tests/emtests/_common.rs b/lib/emscripten/tests/emtests/_common.rs index e7a42b356b9..1289fe741d2 100644 --- a/lib/emscripten/tests/emtests/_common.rs +++ b/lib/emscripten/tests/emtests/_common.rs @@ -20,7 +20,13 @@ macro_rules! assert_emscripten_output { LLVMCompiler::new() } - #[cfg(not(any(feature = "llvm", feature = "clif")))] + #[cfg(feature = "dynasm")] + fn get_compiler() -> impl Compiler { + use wasmer_dynasm_backend::SinglePassCompiler; + SinglePassCompiler::new() + } + + #[cfg(not(any(feature = "llvm", feature = "clif", feature = "dynasm")))] fn get_compiler() -> impl Compiler { panic!("compiler not specified, activate a compiler via features"); use wasmer_clif_backend::CraneliftCompiler; diff --git a/lib/runtime-core/src/backend.rs b/lib/runtime-core/src/backend.rs index 47f0fbae922..a09909ebe32 100644 --- a/lib/runtime-core/src/backend.rs +++ b/lib/runtime-core/src/backend.rs @@ -22,6 +22,7 @@ pub use crate::sig_registry::SigRegistry; #[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Eq)] pub enum Backend { Cranelift, + Dynasm, LLVM, } diff --git a/lib/runtime-core/src/error.rs b/lib/runtime-core/src/error.rs index 9e99c2833b1..61d5ac3a227 100644 --- a/lib/runtime-core/src/error.rs +++ b/lib/runtime-core/src/error.rs @@ -1,6 +1,10 @@ -use crate::types::{FuncSig, GlobalDescriptor, MemoryDescriptor, TableDescriptor, Type, Value}; +use crate::types::{ + FuncSig, GlobalDescriptor, MemoryDescriptor, MemoryIndex, TableDescriptor, TableIndex, Type, + Value, +}; use core::borrow::Borrow; use std::any::Any; +use std::sync::Arc; pub type Result = std::result::Result; pub type CompileResult = std::result::Result; diff --git a/lib/runtime-core/src/types.rs b/lib/runtime-core/src/types.rs index 9ee5270dd73..c69d0edb02b 100644 --- a/lib/runtime-core/src/types.rs +++ b/lib/runtime-core/src/types.rs @@ -1,4 +1,8 @@ -use crate::{memory::MemoryType, module::ModuleInfo, structures::TypedIndex, units::Pages}; +use crate::error::{CompileError, CompileResult}; +use crate::{ + memory::MemoryType, module::ModuleInfo, module::ModuleInner, structures::TypedIndex, + units::Pages, +}; use std::{borrow::Cow, mem}; /// Represents a WebAssembly type. diff --git a/lib/runtime-core/src/vm.rs b/lib/runtime-core/src/vm.rs index 508ca031b8e..8d2721bdd6a 100644 --- a/lib/runtime-core/src/vm.rs +++ b/lib/runtime-core/src/vm.rs @@ -13,41 +13,52 @@ use std::{ffi::c_void, mem, ptr}; #[derive(Debug)] #[repr(C)] pub struct Ctx { + // `internal` must be the first field of `Ctx`. + pub(crate) internal: InternalCtx, + + pub(crate) local_functions: *const *const Func, + + local_backing: *mut LocalBacking, + import_backing: *mut ImportBacking, + module: *const ModuleInner, + + pub data: *mut c_void, + pub data_finalizer: Option, +} + +/// The internal context of the currently running WebAssembly instance. +/// +/// +#[doc(hidden)] +#[derive(Debug)] +#[repr(C)] +pub struct InternalCtx { /// A pointer to an array of locally-defined memories, indexed by `MemoryIndex`. - pub(crate) memories: *mut *mut LocalMemory, + pub memories: *mut *mut LocalMemory, /// A pointer to an array of locally-defined tables, indexed by `TableIndex`. - pub(crate) tables: *mut *mut LocalTable, + pub tables: *mut *mut LocalTable, /// A pointer to an array of locally-defined globals, indexed by `GlobalIndex`. - pub(crate) globals: *mut *mut LocalGlobal, + pub globals: *mut *mut LocalGlobal, /// A pointer to an array of imported memories, indexed by `MemoryIndex, - pub(crate) imported_memories: *mut *mut LocalMemory, + pub imported_memories: *mut *mut LocalMemory, /// A pointer to an array of imported tables, indexed by `TableIndex`. - pub(crate) imported_tables: *mut *mut LocalTable, + pub imported_tables: *mut *mut LocalTable, /// A pointer to an array of imported globals, indexed by `GlobalIndex`. - pub(crate) imported_globals: *mut *mut LocalGlobal, + pub imported_globals: *mut *mut LocalGlobal, /// A pointer to an array of imported functions, indexed by `FuncIndex`. - pub(crate) imported_funcs: *mut ImportedFunc, + pub imported_funcs: *mut ImportedFunc, /// A pointer to an array of signature ids. Conceptually, this maps /// from a static, module-local signature id to a runtime-global /// signature id. This is used to allow call-indirect to other /// modules safely. - pub(crate) dynamic_sigindices: *const SigId, - - pub(crate) local_functions: *const *const Func, - - local_backing: *mut LocalBacking, - import_backing: *mut ImportBacking, - module: *const ModuleInner, - - pub data: *mut c_void, - pub data_finalizer: Option, + pub dynamic_sigindices: *const SigId, } impl Ctx { @@ -58,16 +69,18 @@ impl Ctx { module: &ModuleInner, ) -> Self { Self { - memories: local_backing.vm_memories.as_mut_ptr(), - tables: local_backing.vm_tables.as_mut_ptr(), - globals: local_backing.vm_globals.as_mut_ptr(), + internal: InternalCtx { + memories: local_backing.vm_memories.as_mut_ptr(), + tables: local_backing.vm_tables.as_mut_ptr(), + globals: local_backing.vm_globals.as_mut_ptr(), - imported_memories: import_backing.vm_memories.as_mut_ptr(), - imported_tables: import_backing.vm_tables.as_mut_ptr(), - imported_globals: import_backing.vm_globals.as_mut_ptr(), - imported_funcs: import_backing.vm_functions.as_mut_ptr(), + imported_memories: import_backing.vm_memories.as_mut_ptr(), + imported_tables: import_backing.vm_tables.as_mut_ptr(), + imported_globals: import_backing.vm_globals.as_mut_ptr(), + imported_funcs: import_backing.vm_functions.as_mut_ptr(), - dynamic_sigindices: local_backing.dynamic_sigindices.as_ptr(), + dynamic_sigindices: local_backing.dynamic_sigindices.as_ptr(), + }, local_functions: local_backing.local_functions.as_ptr(), local_backing, @@ -88,16 +101,18 @@ impl Ctx { data_finalizer: extern "C" fn(*mut c_void), ) -> Self { Self { - memories: local_backing.vm_memories.as_mut_ptr(), - tables: local_backing.vm_tables.as_mut_ptr(), - globals: local_backing.vm_globals.as_mut_ptr(), + internal: InternalCtx { + memories: local_backing.vm_memories.as_mut_ptr(), + tables: local_backing.vm_tables.as_mut_ptr(), + globals: local_backing.vm_globals.as_mut_ptr(), - imported_memories: import_backing.vm_memories.as_mut_ptr(), - imported_tables: import_backing.vm_tables.as_mut_ptr(), - imported_globals: import_backing.vm_globals.as_mut_ptr(), - imported_funcs: import_backing.vm_functions.as_mut_ptr(), + imported_memories: import_backing.vm_memories.as_mut_ptr(), + imported_tables: import_backing.vm_tables.as_mut_ptr(), + imported_globals: import_backing.vm_globals.as_mut_ptr(), + imported_funcs: import_backing.vm_functions.as_mut_ptr(), - dynamic_sigindices: local_backing.dynamic_sigindices.as_ptr(), + dynamic_sigindices: local_backing.dynamic_sigindices.as_ptr(), + }, local_functions: local_backing.local_functions.as_ptr(), local_backing, diff --git a/lib/runtime-core/src/vmcalls.rs b/lib/runtime-core/src/vmcalls.rs index b428fb24e11..4126024bf83 100644 --- a/lib/runtime-core/src/vmcalls.rs +++ b/lib/runtime-core/src/vmcalls.rs @@ -17,7 +17,7 @@ pub unsafe extern "C" fn local_static_memory_grow( memory_index: LocalMemoryIndex, delta: Pages, ) -> i32 { - let local_memory = *ctx.memories.add(memory_index.index()); + let local_memory = *ctx.internal.memories.add(memory_index.index()); let memory = (*local_memory).memory as *mut StaticMemory; match (*memory).grow(delta, &mut *local_memory) { @@ -30,7 +30,7 @@ pub unsafe extern "C" fn local_static_memory_size( ctx: &vm::Ctx, memory_index: LocalMemoryIndex, ) -> Pages { - let local_memory = *ctx.memories.add(memory_index.index()); + let local_memory = *ctx.internal.memories.add(memory_index.index()); let memory = (*local_memory).memory as *mut StaticMemory; (*memory).size() @@ -41,7 +41,7 @@ pub unsafe extern "C" fn local_dynamic_memory_grow( memory_index: LocalMemoryIndex, delta: Pages, ) -> i32 { - let local_memory = *ctx.memories.add(memory_index.index()); + let local_memory = *ctx.internal.memories.add(memory_index.index()); let memory = (*local_memory).memory as *mut DynamicMemory; match (*memory).grow(delta, &mut *local_memory) { @@ -54,7 +54,7 @@ pub unsafe extern "C" fn local_dynamic_memory_size( ctx: &vm::Ctx, memory_index: LocalMemoryIndex, ) -> Pages { - let local_memory = *ctx.memories.add(memory_index.index()); + let local_memory = *ctx.internal.memories.add(memory_index.index()); let memory = (*local_memory).memory as *mut DynamicMemory; (*memory).size() @@ -69,7 +69,10 @@ pub unsafe extern "C" fn imported_static_memory_grow( import_memory_index: ImportedMemoryIndex, delta: Pages, ) -> i32 { - let local_memory = *ctx.imported_memories.add(import_memory_index.index()); + let local_memory = *ctx + .internal + .imported_memories + .add(import_memory_index.index()); let memory = (*local_memory).memory as *mut StaticMemory; match (*memory).grow(delta, &mut *local_memory) { @@ -82,7 +85,10 @@ pub unsafe extern "C" fn imported_static_memory_size( ctx: &vm::Ctx, import_memory_index: ImportedMemoryIndex, ) -> Pages { - let local_memory = *ctx.imported_memories.add(import_memory_index.index()); + let local_memory = *ctx + .internal + .imported_memories + .add(import_memory_index.index()); let memory = (*local_memory).memory as *mut StaticMemory; (*memory).size() @@ -93,7 +99,7 @@ pub unsafe extern "C" fn imported_dynamic_memory_grow( memory_index: ImportedMemoryIndex, delta: Pages, ) -> i32 { - let local_memory = *ctx.imported_memories.add(memory_index.index()); + let local_memory = *ctx.internal.imported_memories.add(memory_index.index()); let memory = (*local_memory).memory as *mut DynamicMemory; match (*memory).grow(delta, &mut *local_memory) { @@ -106,7 +112,7 @@ pub unsafe extern "C" fn imported_dynamic_memory_size( ctx: &vm::Ctx, memory_index: ImportedMemoryIndex, ) -> Pages { - let local_memory = *ctx.imported_memories.add(memory_index.index()); + let local_memory = *ctx.internal.imported_memories.add(memory_index.index()); let memory = (*local_memory).memory as *mut DynamicMemory; (*memory).size() diff --git a/lib/runtime/Cargo.toml b/lib/runtime/Cargo.toml index 349334b5a12..c137922f063 100644 --- a/lib/runtime/Cargo.toml +++ b/lib/runtime/Cargo.toml @@ -9,6 +9,7 @@ edition = "2018" readme = "README.md" [dependencies] +wasmer-dynasm-backend = { path = "../dynasm-backend", optional = true } lazy_static = "1.2.0" memmap = "0.7.0" @@ -19,6 +20,7 @@ version = "0.2.1" [dependencies.wasmer-clif-backend] path = "../clif-backend" version = "0.2.0" +optional = true [dev-dependencies] tempfile = "3.0.7" @@ -30,8 +32,12 @@ path = "../llvm-backend" optional = true [features] +default = ["default-compiler"] +default-compiler = ["wasmer-clif-backend"] +cache = ["default-compiler"] debug = ["wasmer-clif-backend/debug", "wasmer-runtime-core/debug"] llvm = ["wasmer-llvm-backend"] +dynasm = ["wasmer-dynasm-backend"] [[bench]] name = "nginx" diff --git a/lib/runtime/src/lib.rs b/lib/runtime/src/lib.rs index fb8d2d80f34..27945aa103b 100644 --- a/lib/runtime/src/lib.rs +++ b/lib/runtime/src/lib.rs @@ -159,7 +159,10 @@ pub fn default_compiler() -> &'static dyn Compiler { #[cfg(feature = "llvm")] use wasmer_llvm_backend::LLVMCompiler as DefaultCompiler; - #[cfg(not(feature = "llvm"))] + #[cfg(feature = "dynasm")] + use wasmer_dynasm_backend::SinglePassCompiler as DefaultCompiler; + + #[cfg(not(any(feature = "llvm", feature = "dynasm")))] use wasmer_clif_backend::CraneliftCompiler as DefaultCompiler; lazy_static! { diff --git a/lib/spectests/Cargo.toml b/lib/spectests/Cargo.toml index 21211e1b42c..8dd8f83f492 100644 --- a/lib/spectests/Cargo.toml +++ b/lib/spectests/Cargo.toml @@ -10,19 +10,19 @@ build = "build/mod.rs" [dependencies] wasmer-runtime-core = { path = "../runtime-core", version = "0.2.0" } +wasmer-clif-backend = { path = "../clif-backend", version = "0.2.0" } +wasmer-llvm-backend = { path = "../llvm-backend", version = "0.1.0", optional = true } +wasmer-dynasm-backend = { path = "../dynasm-backend", version = "0.1.0", optional = true } [build-dependencies] wabt = "0.7.2" [dev-dependencies] -wasmer-clif-backend = { path = "../clif-backend", version = "0.2.0" } wabt = "0.7.2" -[target.'cfg(not(windows))'.dependencies] -wasmer-llvm-backend = { path = "../llvm-backend", version = "0.1.0", optional = true } - [features] default = ["fast-tests"] fast-tests = [] clif = [] llvm = ["wasmer-llvm-backend"] +dynasm = ["wasmer-dynasm-backend"] \ No newline at end of file diff --git a/lib/spectests/build/spectests.rs b/lib/spectests/build/spectests.rs index c1d28ca2907..a13faefd18a 100644 --- a/lib/spectests/build/spectests.rs +++ b/lib/spectests/build/spectests.rs @@ -107,7 +107,13 @@ fn get_compiler() -> impl Compiler { LLVMCompiler::new() } -#[cfg(not(any(feature = "llvm", feature = "clif")))] +#[cfg(feature = "dynasm")] +fn get_compiler() -> impl Compiler { + use wasmer_dynasm_backend::SinglePassCompiler; + SinglePassCompiler::new() +} + +#[cfg(not(any(feature = "llvm", feature = "clif", feature = "dynasm")))] fn get_compiler() -> impl Compiler { panic!("compiler not specified, activate a compiler via features"); use wasmer_clif_backend::CraneliftCompiler; diff --git a/lib/spectests/examples/simple/main.rs b/lib/spectests/examples/simple/main.rs index 595bb3680f5..c61cdb18fc7 100644 --- a/lib/spectests/examples/simple/main.rs +++ b/lib/spectests/examples/simple/main.rs @@ -22,7 +22,13 @@ fn get_compiler() -> impl Compiler { LLVMCompiler::new() } -#[cfg(not(any(feature = "llvm", feature = "clif")))] +#[cfg(feature = "dynasm")] +fn get_compiler() -> impl Compiler { + use wasmer_dynasm_backend::SinglePassCompiler; + SinglePassCompiler::new() +} + +#[cfg(not(any(feature = "llvm", feature = "clif", feature = "dynasm")))] fn get_compiler() -> impl Compiler { panic!("compiler not specified, activate a compiler via features"); use wasmer_clif_backend::CraneliftCompiler; diff --git a/src/bin/wasmer.rs b/src/bin/wasmer.rs index 1af94188749..34064f833d5 100644 --- a/src/bin/wasmer.rs +++ b/src/bin/wasmer.rs @@ -131,8 +131,9 @@ fn execute_wasm(options: &Run) -> Result<(), String> { let module = webassembly::compile(&wasm_binary[..]) .map_err(|e| format!("Can't compile module: {:?}", e))?; - // We save the module into a cache file - cache.store(hash, module.clone()).unwrap(); + // We try to save the module into a cache file + cache.store(hash, module.clone()).unwrap_or_default(); + module } }; diff --git a/src/webassembly.rs b/src/webassembly.rs index bbe50b4d62d..8ae38935b99 100644 --- a/src/webassembly.rs +++ b/src/webassembly.rs @@ -4,6 +4,7 @@ use wasmer_runtime::{ error::{CallResult, Result}, ImportObject, Instance, Module, }; +use wasmer_runtime_core::types::Value; use wasmer_emscripten::{is_emscripten_module, run_emscripten_instance}; @@ -86,7 +87,11 @@ pub fn run_instance( if is_emscripten_module(module) { run_emscripten_instance(module, instance, path, args)?; } else { - instance.call("main", &[])?; + let args: Vec = args + .into_iter() + .map(|x| Value::I32(x.parse().unwrap())) + .collect(); + instance.call("main", &args)?; }; Ok(())