diff --git a/cranelift/filetests/filetests/wasm/fixed-size-memory.wat b/cranelift/filetests/filetests/wasm/fixed-size-memory.wat new file mode 100644 index 000000000000..ac1de89f39af --- /dev/null +++ b/cranelift/filetests/filetests/wasm/fixed-size-memory.wat @@ -0,0 +1,78 @@ +;;! target = "x86_64" +;;! +;;! settings = ["enable_heap_access_spectre_mitigation=false"] +;;! +;;! compile = false +;;! +;;! [globals.vmctx] +;;! type = "i64" +;;! vmctx = true +;;! +;;! [globals.heap_base] +;;! type = "i64" +;;! load = { base = "vmctx", offset = 0, readonly = true } +;;! +;;! [globals.heap_bound] +;;! type = "i64" +;;! load = { base = "vmctx", offset = 8, readonly = true } +;;! +;;! [[heaps]] +;;! base = "heap_base" +;;! min_size = 0x10000 +;;! max_size = 0x10000 +;;! offset_guard_size = 0 +;;! index_type = "i32" +;;! style = { kind = "dynamic", bound = "heap_bound" } + +;; Test that dynamic memories with `min_size == max_size` don't actually load +;; their dynamic memory bound, since it is a constant. + +(module + (memory 1 1) + + (func (export "do_store") (param i32 i32) + local.get 0 + local.get 1 + i32.store8 offset=0) + + (func (export "do_load") (param i32) (result i32) + local.get 0 + i32.load8_u offset=0)) + +;; function u0:0(i32, i32, i64 vmctx) fast { +;; gv0 = vmctx +;; gv1 = load.i64 notrap aligned readonly gv0+8 +;; gv2 = load.i64 notrap aligned readonly gv0 +;; +;; block0(v0: i32, v1: i32, v2: i64): +;; @0041 v3 = uextend.i64 v0 +;; @0041 v4 = iconst.i64 0x0001_0000 +;; @0041 v5 = icmp uge v3, v4 ; v4 = 0x0001_0000 +;; @0041 trapnz v5, heap_oob +;; @0041 v6 = global_value.i64 gv2 +;; @0041 v7 = iadd v6, v3 +;; @0041 istore8 little heap v1, v7 +;; @0044 jump block1 +;; +;; block1: +;; @0044 return +;; } +;; +;; function u0:1(i32, i64 vmctx) -> i32 fast { +;; gv0 = vmctx +;; gv1 = load.i64 notrap aligned readonly gv0+8 +;; gv2 = load.i64 notrap aligned readonly gv0 +;; +;; block0(v0: i32, v1: i64): +;; @0049 v3 = uextend.i64 v0 +;; @0049 v4 = iconst.i64 0x0001_0000 +;; @0049 v5 = icmp uge v3, v4 ; v4 = 0x0001_0000 +;; @0049 trapnz v5, heap_oob +;; @0049 v6 = global_value.i64 gv2 +;; @0049 v7 = iadd v6, v3 +;; @0049 v8 = uload8.i32 little heap v7 +;; @004c jump block1(v8) +;; +;; block1(v2: i32): +;; @004c return v2 +;; } diff --git a/cranelift/filetests/filetests/wasm/non-fixed-size-memory.wat b/cranelift/filetests/filetests/wasm/non-fixed-size-memory.wat new file mode 100644 index 000000000000..dc981fe6ffd6 --- /dev/null +++ b/cranelift/filetests/filetests/wasm/non-fixed-size-memory.wat @@ -0,0 +1,78 @@ +;;! target = "x86_64" +;;! +;;! settings = ["enable_heap_access_spectre_mitigation=false"] +;;! +;;! compile = false +;;! +;;! [globals.vmctx] +;;! type = "i64" +;;! vmctx = true +;;! +;;! [globals.heap_base] +;;! type = "i64" +;;! load = { base = "vmctx", offset = 0, readonly = true } +;;! +;;! [globals.heap_bound] +;;! type = "i64" +;;! load = { base = "vmctx", offset = 8, readonly = true } +;;! +;;! [[heaps]] +;;! base = "heap_base" +;;! min_size = 0x10000 +;;! max_size = 0x20000 +;;! offset_guard_size = 0 +;;! index_type = "i32" +;;! style = { kind = "dynamic", bound = "heap_bound" } + +;; Dual test to `fixed-size-memory.wat` that checks that we _don't_ use a +;; constant for the heap bound when `min_size != max_size`. + +(module + (memory 1 2) + + (func (export "do_store") (param i32 i32) + local.get 0 + local.get 1 + i32.store8 offset=0) + + (func (export "do_load") (param i32) (result i32) + local.get 0 + i32.load8_u offset=0)) + +;; function u0:0(i32, i32, i64 vmctx) fast { +;; gv0 = vmctx +;; gv1 = load.i64 notrap aligned readonly gv0+8 +;; gv2 = load.i64 notrap aligned readonly gv0 +;; +;; block0(v0: i32, v1: i32, v2: i64): +;; @0041 v3 = uextend.i64 v0 +;; @0041 v4 = global_value.i64 gv1 +;; @0041 v5 = icmp uge v3, v4 +;; @0041 trapnz v5, heap_oob +;; @0041 v6 = global_value.i64 gv2 +;; @0041 v7 = iadd v6, v3 +;; @0041 istore8 little heap v1, v7 +;; @0044 jump block1 +;; +;; block1: +;; @0044 return +;; } +;; +;; function u0:1(i32, i64 vmctx) -> i32 fast { +;; gv0 = vmctx +;; gv1 = load.i64 notrap aligned readonly gv0+8 +;; gv2 = load.i64 notrap aligned readonly gv0 +;; +;; block0(v0: i32, v1: i64): +;; @0049 v3 = uextend.i64 v0 +;; @0049 v4 = global_value.i64 gv1 +;; @0049 v5 = icmp uge v3, v4 +;; @0049 trapnz v5, heap_oob +;; @0049 v6 = global_value.i64 gv2 +;; @0049 v7 = iadd v6, v3 +;; @0049 v8 = uload8.i32 little heap v7 +;; @004c jump block1(v8) +;; +;; block1(v2: i32): +;; @004c return v2 +;; } diff --git a/cranelift/filetests/src/test_wasm/config.rs b/cranelift/filetests/src/test_wasm/config.rs index eb9b6f60ec9a..15ca92ec7596 100644 --- a/cranelift/filetests/src/test_wasm/config.rs +++ b/cranelift/filetests/src/test_wasm/config.rs @@ -159,6 +159,9 @@ pub struct TestHeap { #[serde(default)] pub min_size: u64, + #[serde(default)] + pub max_size: Option, + #[serde(default)] pub offset_guard_size: u64, @@ -174,7 +177,8 @@ impl TestHeap { ) -> cranelift_wasm::HeapData { cranelift_wasm::HeapData { base: name_to_ir_global[&self.base], - min_size: self.min_size.into(), + min_size: self.min_size, + max_size: self.max_size, offset_guard_size: self.offset_guard_size.into(), style: self.style.to_ir(name_to_ir_global), index_type: match self.index_type.as_str() { diff --git a/cranelift/wasm/src/code_translator/bounds_checks.rs b/cranelift/wasm/src/code_translator/bounds_checks.rs index 6e236165b031..464417b518cb 100644 --- a/cranelift/wasm/src/code_translator/bounds_checks.rs +++ b/cranelift/wasm/src/code_translator/bounds_checks.rs @@ -85,8 +85,8 @@ where // // index + 1 > bound // ==> index >= bound - HeapStyle::Dynamic { bound_gv } if offset_and_size == 1 => { - let bound = builder.ins().global_value(env.pointer_type(), bound_gv); + HeapStyle::Dynamic { .. } if offset_and_size == 1 => { + let bound = get_dynamic_heap_bound(builder, env, heap); let oob = builder .ins() .icmp(IntCC::UnsignedGreaterThanOrEqual, index, bound); @@ -127,8 +127,8 @@ where // offset immediates -- which is a common code pattern when accessing // multiple fields in the same struct that is in linear memory -- // will all emit the same `index > bound` check, which we can GVN. - HeapStyle::Dynamic { bound_gv } if offset_and_size <= heap.offset_guard_size => { - let bound = builder.ins().global_value(env.pointer_type(), bound_gv); + HeapStyle::Dynamic { .. } if offset_and_size <= heap.offset_guard_size => { + let bound = get_dynamic_heap_bound(builder, env, heap); let oob = builder.ins().icmp(IntCC::UnsignedGreaterThan, index, bound); Reachable(explicit_check_oob_condition_and_compute_addr( &mut builder.cursor(), @@ -149,8 +149,8 @@ where // // index + offset + access_size > bound // ==> index > bound - (offset + access_size) - HeapStyle::Dynamic { bound_gv } if offset_and_size <= heap.min_size.into() => { - let bound = builder.ins().global_value(env.pointer_type(), bound_gv); + HeapStyle::Dynamic { .. } if offset_and_size <= heap.min_size.into() => { + let bound = get_dynamic_heap_bound(builder, env, heap); let adjusted_bound = builder.ins().iadd_imm(bound, -(offset_and_size as i64)); let oob = builder .ins() @@ -172,7 +172,7 @@ where // index + offset + access_size > bound // // And we have to handle the overflow case in the left-hand side. - HeapStyle::Dynamic { bound_gv } => { + HeapStyle::Dynamic { .. } => { let access_size_val = builder .ins() .iconst(env.pointer_type(), offset_and_size as i64); @@ -181,7 +181,7 @@ where access_size_val, ir::TrapCode::HeapOutOfBounds, ); - let bound = builder.ins().global_value(env.pointer_type(), bound_gv); + let bound = get_dynamic_heap_bound(builder, env, heap); let oob = builder .ins() .icmp(IntCC::UnsignedGreaterThan, adjusted_index, bound); @@ -297,6 +297,30 @@ where }) } +/// Get the bound of a dynamic heap as an `ir::Value`. +fn get_dynamic_heap_bound( + builder: &mut FunctionBuilder, + env: &mut Env, + heap: &HeapData, +) -> ir::Value +where + Env: FuncEnvironment + ?Sized, +{ + match (heap.max_size, &heap.style) { + // The heap has a constant size, no need to actually load the bound. + (Some(max_size), _) if heap.min_size == max_size => { + builder.ins().iconst(env.pointer_type(), max_size as i64) + } + + // Load the heap bound from its global variable. + (_, HeapStyle::Dynamic { bound_gv }) => { + builder.ins().global_value(env.pointer_type(), *bound_gv) + } + + (_, HeapStyle::Static { .. }) => unreachable!("not a dynamic heap"), + } +} + fn cast_index_to_pointer_ty( index: ir::Value, index_ty: ir::Type, diff --git a/cranelift/wasm/src/environ/dummy.rs b/cranelift/wasm/src/environ/dummy.rs index aacaeb18a4f0..f27ffc7025e0 100644 --- a/cranelift/wasm/src/environ/dummy.rs +++ b/cranelift/wasm/src/environ/dummy.rs @@ -307,6 +307,7 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ Ok(self.heaps.push(HeapData { base: gv, min_size: 0, + max_size: None, offset_guard_size: 0x8000_0000, style: HeapStyle::Static { bound: 0x1_0000_0000, diff --git a/cranelift/wasm/src/heap.rs b/cranelift/wasm/src/heap.rs index bab1f7e9a292..85d5f1d687e0 100644 --- a/cranelift/wasm/src/heap.rs +++ b/cranelift/wasm/src/heap.rs @@ -74,6 +74,11 @@ pub struct HeapData { /// don't need bounds checking. pub min_size: u64, + /// The maximum heap size in bytes. + /// + /// Heap accesses larger than this will always trap. + pub max_size: Option, + /// Size in bytes of the offset-guard pages following the heap. pub offset_guard_size: u64, diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index 189de51c21c9..2ea9098f040a 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -1839,6 +1839,11 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m u64::MAX }); + let max_size = self.module.memory_plans[index] + .memory + .maximum + .and_then(|max| max.checked_mul(u64::from(WASM_PAGE_SIZE))); + let (ptr, base_offset, current_length_offset) = { let vmctx = self.vmctx(func); if let Some(def_index) = self.module.defined_memory_index(index) { @@ -1943,6 +1948,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m Ok(self.heaps.push(HeapData { base: heap_base, min_size, + max_size, offset_guard_size, style: heap_style, index_type: self.memory_index_type(index),