From 1e05dffbeeb80ba61bf59cc63b222d538d7825ed Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 10 Jun 2021 14:32:39 -0500 Subject: [PATCH] wasm: Mark simd intrinsics as stable (#1179) --- crates/core_arch/src/lib.rs | 2 +- crates/core_arch/src/mod.rs | 20 +- crates/core_arch/src/wasm32/simd128.rs | 369 ++++++++++++++++++++++++- examples/hex.rs | 1 - 4 files changed, 368 insertions(+), 24 deletions(-) diff --git a/crates/core_arch/src/lib.rs b/crates/core_arch/src/lib.rs index ddf012f635..da5b4713aa 100644 --- a/crates/core_arch/src/lib.rs +++ b/crates/core_arch/src/lib.rs @@ -39,7 +39,7 @@ bench_black_box )] #![cfg_attr(test, feature(test, abi_vectorcall))] -#![cfg_attr(all(test, target_arch = "wasm32"), feature(wasm_simd))] +#![cfg_attr(target_arch = "wasm32", feature(wasm_simd_const))] #![deny(clippy::missing_inline_in_public_items)] #![allow( clippy::inline_always, diff --git a/crates/core_arch/src/mod.rs b/crates/core_arch/src/mod.rs index ede4e5a3fb..7812dc91f9 100644 --- a/crates/core_arch/src/mod.rs +++ b/crates/core_arch/src/mod.rs @@ -64,7 +64,7 @@ pub mod arch { /// proposals such as [atomics] and [simd]. /// /// Intrinsics in the `wasm32` module are modeled after the WebAssembly - /// instructions that they represent. All functions are named after the + /// instructions that they represent. Most functions are named after the /// instruction they intend to correspond to, and the arguments/results /// correspond to the type signature of the instruction itself. Stable /// WebAssembly instructions are [documented online][instrdoc]. @@ -104,19 +104,11 @@ pub mod arch { /// /// ## SIMD /// - /// The [simd proposal][simd] for WebAssembly adds a new `v128` type for a - /// 128-bit SIMD register. It also adds a large array of instructions to - /// operate on the `v128` type to perform data processing. The SIMD proposal - /// at the time of this writing is in [phase 4] which means that it's in the - /// standardization phase. It's expected that once some testing on nightly - /// has happened a stabilization proposal will be made for the Rust - /// intrinsics. If you notice anything awry please feel free to [open an - /// issue](https://github.com/rust-lang/stdarch/issues/new). - /// - /// [phase 4]: https://github.com/webassembly/proposals - /// - /// Using SIMD is intended to be similar to as you would on `x86_64`, for - /// example. You'd write a function such as: + /// The [simd proposal][simd] for WebAssembly added a new `v128` type for a + /// 128-bit SIMD register. It also added a large array of instructions to + /// operate on the `v128` type to perform data processing. Using SIMD on + /// wasm is intended to be similar to as you would on `x86_64`, for example. + /// You'd write a function such as: /// /// ```rust,ignore /// #[cfg(target_arch = "wasm32")] diff --git a/crates/core_arch/src/wasm32/simd128.rs b/crates/core_arch/src/wasm32/simd128.rs index 55ab0ad6a3..fb70ce64ad 100644 --- a/crates/core_arch/src/wasm32/simd128.rs +++ b/crates/core_arch/src/wasm32/simd128.rs @@ -3,7 +3,6 @@ //! [WebAssembly `SIMD128` ISA]: //! https://github.com/WebAssembly/simd/blob/master/proposals/simd/SIMD.md -#![unstable(feature = "wasm_simd", issue = "74372")] #![allow(non_camel_case_types)] #![allow(unused_imports)] @@ -38,6 +37,7 @@ types! { /// type in WebAssembly. Operations on `v128` can only be performed with the /// functions in this module. // N.B., internals here are arbitrary. + #[stable(feature = "wasm_simd", since = "1.54.0")] pub struct v128(i32, i32, i32, i32); } @@ -263,6 +263,7 @@ impl Clone for Unaligned { #[cfg_attr(test, assert_instr(v128.load))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load(m: *const v128) -> v128 { (*(m as *const Unaligned)).0 } @@ -272,6 +273,7 @@ pub unsafe fn v128_load(m: *const v128) -> v128 { #[cfg_attr(test, assert_instr(v128.load8x8_s))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load8x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 { let m = *(m as *const Unaligned); simd_cast::<_, simd::i16x8>(m.0).v128() @@ -282,11 +284,13 @@ pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 { #[cfg_attr(test, assert_instr(v128.load8x8_u))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load8x8_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i16x8_load_extend_u8x8(m: *const u8) -> v128 { let m = *(m as *const Unaligned); simd_cast::<_, simd::u16x8>(m.0).v128() } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_load_extend_u8x8 as u16x8_load_extend_u8x8; /// Load four 16-bit integers and sign extend each one to a 32-bit lane @@ -294,6 +298,7 @@ pub use i16x8_load_extend_u8x8 as u16x8_load_extend_u8x8; #[cfg_attr(test, assert_instr(v128.load16x4_s))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load16x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 { let m = *(m as *const Unaligned); simd_cast::<_, simd::i32x4>(m.0).v128() @@ -304,11 +309,13 @@ pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 { #[cfg_attr(test, assert_instr(v128.load16x4_u))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load16x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i32x4_load_extend_u16x4(m: *const u16) -> v128 { let m = *(m as *const Unaligned); simd_cast::<_, simd::u32x4>(m.0).v128() } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_load_extend_u16x4 as u32x4_load_extend_u16x4; /// Load two 32-bit integers and sign extend each one to a 64-bit lane @@ -316,6 +323,7 @@ pub use i32x4_load_extend_u16x4 as u32x4_load_extend_u16x4; #[cfg_attr(test, assert_instr(v128.load32x2_s))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load32x2_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 { let m = *(m as *const Unaligned); simd_cast::<_, simd::i64x2>(m.0).v128() @@ -326,11 +334,13 @@ pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 { #[cfg_attr(test, assert_instr(v128.load32x2_u))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load32x2_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn i64x2_load_extend_u32x2(m: *const u32) -> v128 { let m = *(m as *const Unaligned); simd_cast::<_, simd::u64x2>(m.0).v128() } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_load_extend_u32x2 as u64x2_load_extend_u32x2; /// Load a single element and splat to all lanes of a v128 vector. @@ -338,6 +348,7 @@ pub use i64x2_load_extend_u32x2 as u64x2_load_extend_u32x2; #[cfg_attr(test, assert_instr(v128.load8_splat))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load8_splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load8_splat(m: *const u8) -> v128 { simd::u8x16::splat(*m).v128() } @@ -347,6 +358,7 @@ pub unsafe fn v128_load8_splat(m: *const u8) -> v128 { #[cfg_attr(test, assert_instr(v128.load16_splat))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load16_splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load16_splat(m: *const u16) -> v128 { let m = ptr::read_unaligned(m); simd::u16x8::splat(m).v128() @@ -357,6 +369,7 @@ pub unsafe fn v128_load16_splat(m: *const u16) -> v128 { #[cfg_attr(test, assert_instr(v128.load32_splat))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load32_splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load32_splat(m: *const u32) -> v128 { let m = ptr::read_unaligned(m); simd::u32x4::splat(m).v128() @@ -367,6 +380,7 @@ pub unsafe fn v128_load32_splat(m: *const u32) -> v128 { #[cfg_attr(test, assert_instr(v128.load64_splat))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load64_splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load64_splat(m: *const u64) -> v128 { let m = ptr::read_unaligned(m); simd::u64x2::splat(m).v128() @@ -378,6 +392,7 @@ pub unsafe fn v128_load64_splat(m: *const u64) -> v128 { #[cfg_attr(test, assert_instr(v128.load32_zero))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load32_zero"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load32_zero(m: *const u32) -> v128 { llvm_load32_zero(m).v128() } @@ -388,6 +403,7 @@ pub unsafe fn v128_load32_zero(m: *const u32) -> v128 { #[cfg_attr(test, assert_instr(v128.load64_zero))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load64_zero"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load64_zero(m: *const u64) -> v128 { llvm_load64_zero(m).v128() } @@ -397,6 +413,7 @@ pub unsafe fn v128_load64_zero(m: *const u64) -> v128 { #[cfg_attr(test, assert_instr(v128.store))] #[target_feature(enable = "simd128")] #[doc(alias("v128.store"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_store(m: *mut v128, a: v128) { *(m as *mut Unaligned) = Unaligned(a); } @@ -406,6 +423,7 @@ pub unsafe fn v128_store(m: *mut v128, a: v128) { #[cfg_attr(test, assert_instr(v128.load8_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load8_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load8_lane(v: v128, m: *const u8) -> v128 { static_assert!(L: usize where L < 16); llvm_load8_lane(m, v.as_u8x16(), L).v128() @@ -416,6 +434,7 @@ pub unsafe fn v128_load8_lane(v: v128, m: *const u8) -> v128 { #[cfg_attr(test, assert_instr(v128.load16_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load16_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load16_lane(v: v128, m: *const u16) -> v128 { static_assert!(L: usize where L < 8); llvm_load16_lane(m, v.as_u16x8(), L).v128() @@ -426,6 +445,7 @@ pub unsafe fn v128_load16_lane(v: v128, m: *const u16) -> v128 { #[cfg_attr(test, assert_instr(v128.load32_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load32_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load32_lane(v: v128, m: *const u32) -> v128 { static_assert!(L: usize where L < 4); llvm_load32_lane(m, v.as_u32x4(), L).v128() @@ -436,6 +456,7 @@ pub unsafe fn v128_load32_lane(v: v128, m: *const u32) -> v128 { #[cfg_attr(test, assert_instr(v128.load64_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.load64_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_load64_lane(v: v128, m: *const u64) -> v128 { static_assert!(L: usize where L < 2); llvm_load64_lane(m, v.as_u64x2(), L).v128() @@ -446,6 +467,7 @@ pub unsafe fn v128_load64_lane(v: v128, m: *const u64) -> v128 { #[cfg_attr(test, assert_instr(v128.store8_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.store8_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_store8_lane(v: v128, m: *mut u8) { static_assert!(L: usize where L < 16); llvm_store8_lane(m, v.as_u8x16(), L); @@ -456,6 +478,7 @@ pub unsafe fn v128_store8_lane(v: v128, m: *mut u8) { #[cfg_attr(test, assert_instr(v128.store16_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.store16_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_store16_lane(v: v128, m: *mut u16) { static_assert!(L: usize where L < 8); llvm_store16_lane(m, v.as_u16x8(), L) @@ -466,6 +489,7 @@ pub unsafe fn v128_store16_lane(v: v128, m: *mut u16) { #[cfg_attr(test, assert_instr(v128.store32_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.store32_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_store32_lane(v: v128, m: *mut u32) { static_assert!(L: usize where L < 4); llvm_store32_lane(m, v.as_u32x4(), L) @@ -476,6 +500,7 @@ pub unsafe fn v128_store32_lane(v: v128, m: *mut u32) { #[cfg_attr(test, assert_instr(v128.store64_lane, L = 0))] #[target_feature(enable = "simd128")] #[doc(alias("v128.store64_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub unsafe fn v128_store64_lane(v: v128, m: *mut u64) { static_assert!(L: usize where L < 2); llvm_store64_lane(m, v.as_u64x2(), L) @@ -510,6 +535,8 @@ pub unsafe fn v128_store64_lane(v: v128, m: *mut u64) { ) )] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn i8x16( a0: i8, a1: i8, @@ -528,10 +555,24 @@ pub const fn i8x16( a14: i8, a15: i8, ) -> v128 { - simd::i8x16( - a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, + v128( + (a0 as u8 as i32) + | ((a1 as u8 as i32) << 8) + | ((a2 as u8 as i32) << 16) + | ((a3 as u8 as i32) << 24), + (a4 as u8 as i32) + | ((a5 as u8 as i32) << 8) + | ((a6 as u8 as i32) << 16) + | ((a7 as u8 as i32) << 24), + (a8 as u8 as i32) + | ((a9 as u8 as i32) << 8) + | ((a10 as u8 as i32) << 16) + | ((a11 as u8 as i32) << 24), + (a12 as u8 as i32) + | ((a13 as u8 as i32) << 8) + | ((a14 as u8 as i32) << 16) + | ((a15 as u8 as i32) << 24), ) - .v128() } /// Materializes a SIMD value from the provided operands. @@ -541,6 +582,8 @@ pub const fn i8x16( #[inline] #[target_feature(enable = "simd128")] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn u8x16( a0: u8, a1: u8, @@ -586,8 +629,15 @@ pub const fn u8x16( ) )] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn i16x8(a0: i16, a1: i16, a2: i16, a3: i16, a4: i16, a5: i16, a6: i16, a7: i16) -> v128 { - simd::i16x8(a0, a1, a2, a3, a4, a5, a6, a7).v128() + v128( + (a0 as u16 as i32) | ((a1 as i32) << 16), + (a2 as u16 as i32) | ((a3 as i32) << 16), + (a4 as u16 as i32) | ((a5 as i32) << 16), + (a6 as u16 as i32) | ((a7 as i32) << 16), + ) } /// Materializes a SIMD value from the provided operands. @@ -597,6 +647,8 @@ pub const fn i16x8(a0: i16, a1: i16, a2: i16, a3: i16, a4: i16, a5: i16, a6: i16 #[inline] #[target_feature(enable = "simd128")] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn u16x8(a0: u16, a1: u16, a2: u16, a3: u16, a4: u16, a5: u16, a6: u16, a7: u16) -> v128 { i16x8( a0 as i16, a1 as i16, a2 as i16, a3 as i16, a4 as i16, a5 as i16, a6 as i16, a7 as i16, @@ -611,8 +663,10 @@ pub const fn u16x8(a0: u16, a1: u16, a2: u16, a3: u16, a4: u16, a5: u16, a6: u16 #[target_feature(enable = "simd128")] #[cfg_attr(test, assert_instr(v128.const, a0 = 0, a1 = 1, a2 = 2, a3 = 3))] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128 { - simd::i32x4(a0, a1, a2, a3).v128() + v128(a0, a1, a2, a3) } /// Materializes a SIMD value from the provided operands. @@ -622,6 +676,8 @@ pub const fn i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128 { i32x4(a0 as i32, a1 as i32, a2 as i32, a3 as i32) } @@ -632,10 +688,12 @@ pub const fn u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128 { /// be lowered to a sequence of instructions to materialize the vector value. #[inline] #[target_feature(enable = "simd128")] -#[cfg_attr(test, assert_instr(v128.const, a0 = 0, a1 = 1))] +#[cfg_attr(test, assert_instr(v128.const, a0 = 1, a1 = 2))] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn i64x2(a0: i64, a1: i64) -> v128 { - simd::i64x2(a0, a1).v128() + v128(a0 as i32, (a0 >> 32) as i32, a1 as i32, (a1 >> 32) as i32) } /// Materializes a SIMD value from the provided operands. @@ -645,6 +703,8 @@ pub const fn i64x2(a0: i64, a1: i64) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")] pub const fn u64x2(a0: u64, a1: u64) -> v128 { i64x2(a0 as i64, a1 as i64) } @@ -657,6 +717,8 @@ pub const fn u64x2(a0: u64, a1: u64) -> v128 { #[target_feature(enable = "simd128")] #[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0, a2 = 2.0, a3 = 3.0))] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_unstable(feature = "wasm_simd_const", issue = "72447")] pub const fn f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128 { simd::f32x4(a0, a1, a2, a3).v128() } @@ -669,6 +731,8 @@ pub const fn f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128 { #[target_feature(enable = "simd128")] #[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0))] #[doc(alias("v128.const"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] +#[rustc_const_unstable(feature = "wasm_simd_const", issue = "72447")] pub const fn f64x2(a0: f64, a1: f64) -> v128 { simd::f64x2(a0, a1).v128() } @@ -711,6 +775,7 @@ pub const fn f64x2(a0: f64, a1: f64) -> v128 { )] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shuffle"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_shuffle< const I0: usize, const I1: usize, @@ -779,6 +844,7 @@ pub fn i8x16_shuffle< shuf.v128() } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_shuffle as u8x16_shuffle; /// Same as [`i8x16_shuffle`], except operates as if the inputs were eight @@ -804,6 +870,7 @@ pub use i8x16_shuffle as u8x16_shuffle; )] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shuffle"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_shuffle< const I0: usize, const I1: usize, @@ -846,6 +913,7 @@ pub fn i16x8_shuffle< shuf.v128() } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_shuffle as u16x8_shuffle; /// Same as [`i8x16_shuffle`], except operates as if the inputs were four @@ -859,6 +927,7 @@ pub use i16x8_shuffle as u16x8_shuffle; #[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2, I2 = 4, I3 = 6))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shuffle"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_shuffle( a: v128, b: v128, @@ -877,6 +946,7 @@ pub fn i32x4_shuffle(a: v128, b: v128) -> v128 { static_assert!(I0: usize where I0 < 4); static_assert!(I1: usize where I1 < 4); @@ -903,6 +974,7 @@ pub fn i64x2_shuffle(a: v128, b: v128) -> v128 shuf.v128() } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_shuffle as u64x2_shuffle; /// Extracts a lane from a 128-bit vector interpreted as 16 packed i8 numbers. @@ -913,6 +985,7 @@ pub use i64x2_shuffle as u64x2_shuffle; #[cfg_attr(test, assert_instr(i8x16.extract_lane_s, N = 3))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.extract_lane_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_extract_lane(a: v128) -> i8 { static_assert!(N: usize where N < 16); unsafe { simd_extract(a.as_i8x16(), N as u32) } @@ -926,6 +999,7 @@ pub fn i8x16_extract_lane(a: v128) -> i8 { #[cfg_attr(test, assert_instr(i8x16.extract_lane_u, N = 3))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.extract_lane_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_extract_lane(a: v128) -> u8 { static_assert!(N: usize where N < 16); unsafe { simd_extract(a.as_u8x16(), N as u32) } @@ -939,6 +1013,7 @@ pub fn u8x16_extract_lane(a: v128) -> u8 { #[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_replace_lane(a: v128, val: i8) -> v128 { static_assert!(N: usize where N < 16); unsafe { simd_insert(a.as_i8x16(), N as u32, val).v128() } @@ -952,6 +1027,7 @@ pub fn i8x16_replace_lane(a: v128, val: i8) -> v128 { #[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_replace_lane(a: v128, val: u8) -> v128 { static_assert!(N: usize where N < 16); unsafe { simd_insert(a.as_u8x16(), N as u32, val).v128() } @@ -965,6 +1041,7 @@ pub fn u8x16_replace_lane(a: v128, val: u8) -> v128 { #[cfg_attr(test, assert_instr(i16x8.extract_lane_s, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extract_lane_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extract_lane(a: v128) -> i16 { static_assert!(N: usize where N < 8); unsafe { simd_extract(a.as_i16x8(), N as u32) } @@ -978,6 +1055,7 @@ pub fn i16x8_extract_lane(a: v128) -> i16 { #[cfg_attr(test, assert_instr(i16x8.extract_lane_u, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extract_lane_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_extract_lane(a: v128) -> u16 { static_assert!(N: usize where N < 8); unsafe { simd_extract(a.as_u16x8(), N as u32) } @@ -991,6 +1069,7 @@ pub fn u16x8_extract_lane(a: v128) -> u16 { #[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_replace_lane(a: v128, val: i16) -> v128 { static_assert!(N: usize where N < 8); unsafe { simd_insert(a.as_i16x8(), N as u32, val).v128() } @@ -1004,6 +1083,7 @@ pub fn i16x8_replace_lane(a: v128, val: i16) -> v128 { #[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_replace_lane(a: v128, val: u16) -> v128 { static_assert!(N: usize where N < 8); unsafe { simd_insert(a.as_u16x8(), N as u32, val).v128() } @@ -1017,6 +1097,7 @@ pub fn u16x8_replace_lane(a: v128, val: u16) -> v128 { #[cfg_attr(test, assert_instr(i32x4.extract_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extract_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extract_lane(a: v128) -> i32 { static_assert!(N: usize where N < 4); unsafe { simd_extract(a.as_i32x4(), N as u32) } @@ -1029,6 +1110,7 @@ pub fn i32x4_extract_lane(a: v128) -> i32 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extract_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_extract_lane(a: v128) -> u32 { i32x4_extract_lane::(a) as u32 } @@ -1041,6 +1123,7 @@ pub fn u32x4_extract_lane(a: v128) -> u32 { #[cfg_attr(test, assert_instr(i32x4.replace_lane, N = 2))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_replace_lane(a: v128, val: i32) -> v128 { static_assert!(N: usize where N < 4); unsafe { simd_insert(a.as_i32x4(), N as u32, val).v128() } @@ -1053,6 +1136,7 @@ pub fn i32x4_replace_lane(a: v128, val: i32) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_replace_lane(a: v128, val: u32) -> v128 { i32x4_replace_lane::(a, val as i32) } @@ -1065,6 +1149,7 @@ pub fn u32x4_replace_lane(a: v128, val: u32) -> v128 { #[cfg_attr(test, assert_instr(i64x2.extract_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extract_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extract_lane(a: v128) -> i64 { static_assert!(N: usize where N < 2); unsafe { simd_extract(a.as_i64x2(), N as u32) } @@ -1077,6 +1162,7 @@ pub fn i64x2_extract_lane(a: v128) -> i64 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extract_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u64x2_extract_lane(a: v128) -> u64 { i64x2_extract_lane::(a) as u64 } @@ -1089,6 +1175,7 @@ pub fn u64x2_extract_lane(a: v128) -> u64 { #[cfg_attr(test, assert_instr(i64x2.replace_lane, N = 0))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_replace_lane(a: v128, val: i64) -> v128 { static_assert!(N: usize where N < 2); unsafe { simd_insert(a.as_i64x2(), N as u32, val).v128() } @@ -1101,6 +1188,7 @@ pub fn i64x2_replace_lane(a: v128, val: i64) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u64x2_replace_lane(a: v128, val: u64) -> v128 { i64x2_replace_lane::(a, val as i64) } @@ -1113,6 +1201,7 @@ pub fn u64x2_replace_lane(a: v128, val: u64) -> v128 { #[cfg_attr(test, assert_instr(f32x4.extract_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.extract_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_extract_lane(a: v128) -> f32 { static_assert!(N: usize where N < 4); unsafe { simd_extract(a.as_f32x4(), N as u32) } @@ -1126,6 +1215,7 @@ pub fn f32x4_extract_lane(a: v128) -> f32 { #[cfg_attr(test, assert_instr(f32x4.replace_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_replace_lane(a: v128, val: f32) -> v128 { static_assert!(N: usize where N < 4); unsafe { simd_insert(a.as_f32x4(), N as u32, val).v128() } @@ -1139,6 +1229,7 @@ pub fn f32x4_replace_lane(a: v128, val: f32) -> v128 { #[cfg_attr(test, assert_instr(f64x2.extract_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.extract_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_extract_lane(a: v128) -> f64 { static_assert!(N: usize where N < 2); unsafe { simd_extract(a.as_f64x2(), N as u32) } @@ -1152,6 +1243,7 @@ pub fn f64x2_extract_lane(a: v128) -> f64 { #[cfg_attr(test, assert_instr(f64x2.replace_lane, N = 1))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.replace_lane"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_replace_lane(a: v128, val: f64) -> v128 { static_assert!(N: usize where N < 2); unsafe { simd_insert(a.as_f64x2(), N as u32, val).v128() } @@ -1166,10 +1258,12 @@ pub fn f64x2_replace_lane(a: v128, val: f64) -> v128 { #[cfg_attr(test, assert_instr(i8x16.swizzle))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.swizzle"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_swizzle(a: v128, s: v128) -> v128 { unsafe { llvm_swizzle(a.as_i8x16(), s.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_swizzle as u8x16_swizzle; /// Creates a vector with identical lanes. @@ -1179,6 +1273,7 @@ pub use i8x16_swizzle as u8x16_swizzle; #[cfg_attr(test, assert_instr(i8x16.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_splat(a: i8) -> v128 { simd::i8x16::splat(a).v128() } @@ -1190,6 +1285,7 @@ pub fn i8x16_splat(a: i8) -> v128 { #[cfg_attr(test, assert_instr(i8x16.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_splat(a: u8) -> v128 { simd::u8x16::splat(a).v128() } @@ -1201,6 +1297,7 @@ pub fn u8x16_splat(a: u8) -> v128 { #[cfg_attr(test, assert_instr(i16x8.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_splat(a: i16) -> v128 { simd::i16x8::splat(a).v128() } @@ -1212,6 +1309,7 @@ pub fn i16x8_splat(a: i16) -> v128 { #[cfg_attr(test, assert_instr(i16x8.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_splat(a: u16) -> v128 { simd::u16x8::splat(a).v128() } @@ -1223,6 +1321,7 @@ pub fn u16x8_splat(a: u16) -> v128 { #[cfg_attr(test, assert_instr(i32x4.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_splat(a: i32) -> v128 { simd::i32x4::splat(a).v128() } @@ -1233,6 +1332,7 @@ pub fn i32x4_splat(a: i32) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_splat(a: u32) -> v128 { i32x4_splat(a as i32) } @@ -1244,6 +1344,7 @@ pub fn u32x4_splat(a: u32) -> v128 { #[cfg_attr(test, assert_instr(i64x2.splat))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_splat(a: i64) -> v128 { simd::i64x2::splat(a).v128() } @@ -1254,6 +1355,7 @@ pub fn i64x2_splat(a: i64) -> v128 { #[inline] #[target_feature(enable = "simd128")] #[doc(alias("u64x2.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u64x2_splat(a: u64) -> v128 { i64x2_splat(a as i64) } @@ -1265,6 +1367,7 @@ pub fn u64x2_splat(a: u64) -> v128 { #[cfg_attr(test, assert_instr(f32x4.splat))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_splat(a: f32) -> v128 { simd::f32x4::splat(a).v128() } @@ -1276,6 +1379,7 @@ pub fn f32x4_splat(a: f32) -> v128 { #[cfg_attr(test, assert_instr(f64x2.splat))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.splat"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_splat(a: f64) -> v128 { simd::f64x2::splat(a).v128() } @@ -1289,6 +1393,7 @@ pub fn f64x2_splat(a: f64) -> v128 { #[cfg_attr(test, assert_instr(i8x16.eq))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.eq"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_eq(a: v128, b: v128) -> v128 { unsafe { simd_eq::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -1302,11 +1407,14 @@ pub fn i8x16_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.ne))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.ne"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_ne(a: v128, b: v128) -> v128 { unsafe { simd_ne::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_eq as u8x16_eq; +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_ne as u8x16_ne; /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit @@ -1318,6 +1426,7 @@ pub use i8x16_ne as u8x16_ne; #[cfg_attr(test, assert_instr(i8x16.lt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.lt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -1331,6 +1440,7 @@ pub fn i8x16_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.lt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.lt_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() } } @@ -1344,6 +1454,7 @@ pub fn u8x16_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.gt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.gt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -1357,6 +1468,7 @@ pub fn i8x16_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.gt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.gt_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() } } @@ -1370,6 +1482,7 @@ pub fn u8x16_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.le_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.le_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -1383,6 +1496,7 @@ pub fn i8x16_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.le_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.le_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() } } @@ -1396,6 +1510,7 @@ pub fn u8x16_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.ge_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.ge_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -1409,6 +1524,7 @@ pub fn i8x16_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.ge_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.ge_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() } } @@ -1422,6 +1538,7 @@ pub fn u8x16_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.eq))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.eq"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_eq(a: v128, b: v128) -> v128 { unsafe { simd_eq::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -1435,11 +1552,14 @@ pub fn i16x8_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.ne))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.ne"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_ne(a: v128, b: v128) -> v128 { unsafe { simd_ne::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_eq as u16x8_eq; +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_ne as u16x8_ne; /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit @@ -1451,6 +1571,7 @@ pub use i16x8_ne as u16x8_ne; #[cfg_attr(test, assert_instr(i16x8.lt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.lt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -1464,6 +1585,7 @@ pub fn i16x8_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.lt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.lt_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() } } @@ -1477,6 +1599,7 @@ pub fn u16x8_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.gt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.gt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -1490,6 +1613,7 @@ pub fn i16x8_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.gt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.gt_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() } } @@ -1503,6 +1627,7 @@ pub fn u16x8_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.le_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.le_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -1516,6 +1641,7 @@ pub fn i16x8_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.le_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.le_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() } } @@ -1529,6 +1655,7 @@ pub fn u16x8_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.ge_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.ge_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -1542,6 +1669,7 @@ pub fn i16x8_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.ge_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.ge_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() } } @@ -1555,6 +1683,7 @@ pub fn u16x8_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.eq))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.eq"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_eq(a: v128, b: v128) -> v128 { unsafe { simd_eq::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -1568,11 +1697,14 @@ pub fn i32x4_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.ne))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.ne"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_ne(a: v128, b: v128) -> v128 { unsafe { simd_ne::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_eq as u32x4_eq; +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_ne as u32x4_ne; /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit @@ -1584,6 +1716,7 @@ pub use i32x4_ne as u32x4_ne; #[cfg_attr(test, assert_instr(i32x4.lt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.lt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -1597,6 +1730,7 @@ pub fn i32x4_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.lt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.lt_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() } } @@ -1610,6 +1744,7 @@ pub fn u32x4_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.gt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.gt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -1623,6 +1758,7 @@ pub fn i32x4_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.gt_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.gt_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() } } @@ -1636,6 +1772,7 @@ pub fn u32x4_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.le_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.le_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -1649,6 +1786,7 @@ pub fn i32x4_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.le_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.le_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() } } @@ -1662,6 +1800,7 @@ pub fn u32x4_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.ge_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.ge_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -1675,6 +1814,7 @@ pub fn i32x4_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.ge_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.ge_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() } } @@ -1688,6 +1828,7 @@ pub fn u32x4_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.eq))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.eq"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_eq(a: v128, b: v128) -> v128 { unsafe { simd_eq::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1701,11 +1842,14 @@ pub fn i64x2_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.ne))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.ne"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_ne(a: v128, b: v128) -> v128 { unsafe { simd_ne::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_eq as u64x2_eq; +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_ne as u64x2_ne; /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit @@ -1717,6 +1861,7 @@ pub use i64x2_ne as u64x2_ne; #[cfg_attr(test, assert_instr(i64x2.lt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.lt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1730,6 +1875,7 @@ pub fn i64x2_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.gt_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.gt_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1743,6 +1889,7 @@ pub fn i64x2_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.le_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.le_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1756,6 +1903,7 @@ pub fn i64x2_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.ge_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.ge_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1769,6 +1917,7 @@ pub fn i64x2_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.eq))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.eq"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_eq(a: v128, b: v128) -> v128 { unsafe { simd_eq::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -1782,6 +1931,7 @@ pub fn f32x4_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.ne))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.ne"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_ne(a: v128, b: v128) -> v128 { unsafe { simd_ne::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -1795,6 +1945,7 @@ pub fn f32x4_ne(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.lt))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.lt"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -1808,6 +1959,7 @@ pub fn f32x4_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.gt))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.gt"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -1821,6 +1973,7 @@ pub fn f32x4_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.le))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.le"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -1834,6 +1987,7 @@ pub fn f32x4_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.ge))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.ge"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -1847,6 +2001,7 @@ pub fn f32x4_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.eq))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.eq"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_eq(a: v128, b: v128) -> v128 { unsafe { simd_eq::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -1860,6 +2015,7 @@ pub fn f64x2_eq(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.ne))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.ne"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_ne(a: v128, b: v128) -> v128 { unsafe { simd_ne::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -1873,6 +2029,7 @@ pub fn f64x2_ne(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.lt))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.lt"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_lt(a: v128, b: v128) -> v128 { unsafe { simd_lt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -1886,6 +2043,7 @@ pub fn f64x2_lt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.gt))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.gt"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_gt(a: v128, b: v128) -> v128 { unsafe { simd_gt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -1899,6 +2057,7 @@ pub fn f64x2_gt(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.le))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.le"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_le(a: v128, b: v128) -> v128 { unsafe { simd_le::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -1912,6 +2071,7 @@ pub fn f64x2_le(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.ge))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.ge"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_ge(a: v128, b: v128) -> v128 { unsafe { simd_ge::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -1921,6 +2081,7 @@ pub fn f64x2_ge(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.not))] #[target_feature(enable = "simd128")] #[doc(alias("v128.not"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn v128_not(a: v128) -> v128 { unsafe { simd_xor(a.as_i64x2(), simd::i64x2(!0, !0)).v128() } } @@ -1931,6 +2092,7 @@ pub fn v128_not(a: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.and))] #[target_feature(enable = "simd128")] #[doc(alias("v128.and"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn v128_and(a: v128, b: v128) -> v128 { unsafe { simd_and(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1942,6 +2104,7 @@ pub fn v128_and(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.andnot))] #[target_feature(enable = "simd128")] #[doc(alias("v128.andnot"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn v128_andnot(a: v128, b: v128) -> v128 { unsafe { simd_and(a.as_i64x2(), simd_xor(b.as_i64x2(), simd::i64x2(-1, -1))).v128() } } @@ -1952,6 +2115,7 @@ pub fn v128_andnot(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.or))] #[target_feature(enable = "simd128")] #[doc(alias("v128.or"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn v128_or(a: v128, b: v128) -> v128 { unsafe { simd_or(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1962,6 +2126,7 @@ pub fn v128_or(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.xor))] #[target_feature(enable = "simd128")] #[doc(alias("v128.xor"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn v128_xor(a: v128, b: v128) -> v128 { unsafe { simd_xor(a.as_i64x2(), b.as_i64x2()).v128() } } @@ -1971,6 +2136,7 @@ pub fn v128_xor(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.bitselect))] #[target_feature(enable = "simd128")] #[doc(alias("v128.bitselect"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn v128_bitselect(v1: v128, v2: v128, c: v128) -> v128 { unsafe { llvm_bitselect(v1.as_i8x16(), v2.as_i8x16(), c.as_i8x16()).v128() } } @@ -1980,6 +2146,7 @@ pub fn v128_bitselect(v1: v128, v2: v128, c: v128) -> v128 { #[cfg_attr(test, assert_instr(v128.any_true))] #[target_feature(enable = "simd128")] #[doc(alias("v128.any_true"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn v128_any_true(a: v128) -> bool { unsafe { llvm_any_true_i8x16(a.as_i8x16()) != 0 } } @@ -1989,6 +2156,7 @@ pub fn v128_any_true(a: v128) -> bool { #[cfg_attr(test, assert_instr(i8x16.abs))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.abs"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_abs(a: v128) -> v128 { unsafe { let a = a.as_i8x16(); @@ -2002,6 +2170,7 @@ pub fn i8x16_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.neg))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.neg"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_neg(a: v128) -> v128 { unsafe { simd_mul(a.as_i8x16(), simd::i8x16::splat(-1)).v128() } } @@ -2011,10 +2180,12 @@ pub fn i8x16_neg(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i8x16.popcnt))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i8x16.popcnt"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_popcnt(v: v128) -> v128 { unsafe { llvm_popcnt(v.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_popcnt as u8x16_popcnt; /// Returns true if all lanes are nonzero or false if any lane is nonzero. @@ -2022,10 +2193,12 @@ pub use i8x16_popcnt as u8x16_popcnt; #[cfg_attr(test, assert_instr(i8x16.all_true))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.all_true"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_all_true(a: v128) -> bool { unsafe { llvm_i8x16_all_true(a.as_i8x16()) != 0 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_all_true as u8x16_all_true; /// Extracts the high bit for each lane in `a` and produce a scalar mask with @@ -2034,6 +2207,7 @@ pub use i8x16_all_true as u8x16_all_true; #[cfg_attr(test, assert_instr(i8x16.bitmask))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.bitmask"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_bitmask(a: v128) -> u16 { // FIXME(https://bugs.llvm.org/show_bug.cgi?id=50507) - this produces an // extraneous `i32.and` instruction against a mask of 65535 when converting @@ -2042,6 +2216,7 @@ pub fn i8x16_bitmask(a: v128) -> u16 { unsafe { llvm_bitmask_i8x16(a.as_i8x16()) as u16 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_bitmask as u8x16_bitmask; /// Converts two input vectors into a smaller lane vector by narrowing each @@ -2053,6 +2228,7 @@ pub use i8x16_bitmask as u8x16_bitmask; #[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.narrow_i16x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_narrow_i16x8(a: v128, b: v128) -> v128 { unsafe { llvm_narrow_i8x16_s(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2066,6 +2242,7 @@ pub fn i8x16_narrow_i16x8(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.narrow_i16x8_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_narrow_i16x8(a: v128, b: v128) -> v128 { unsafe { llvm_narrow_i8x16_u(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2078,10 +2255,12 @@ pub fn u8x16_narrow_i16x8(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.shl))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shl"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_shl(a: v128, amt: u32) -> v128 { unsafe { simd_shl(a.as_i8x16(), simd::i8x16::splat(amt as i8)).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_shl as u8x16_shl; /// Shifts each lane to the right by the specified number of bits, sign @@ -2093,6 +2272,7 @@ pub use i8x16_shl as u8x16_shl; #[cfg_attr(test, assert_instr(i8x16.shr_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shr_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_i8x16(), simd::i8x16::splat(amt as i8)).v128() } } @@ -2106,6 +2286,7 @@ pub fn i8x16_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i8x16.shr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.shr_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_u8x16(), simd::u8x16::splat(amt as u8)).v128() } } @@ -2115,10 +2296,12 @@ pub fn u8x16_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i8x16.add))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.add"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_add(a: v128, b: v128) -> v128 { unsafe { simd_add(a.as_i8x16(), b.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_add as u8x16_add; /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit signed @@ -2127,6 +2310,7 @@ pub use i8x16_add as u8x16_add; #[cfg_attr(test, assert_instr(i8x16.add_sat_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.add_sat_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_add_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i8x16_add_sat_s(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -2137,6 +2321,7 @@ pub fn i8x16_add_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.add_sat_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.add_sat_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_add_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i8x16_add_sat_u(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -2146,10 +2331,12 @@ pub fn u8x16_add_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.sub))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.sub"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_sub(a: v128, b: v128) -> v128 { unsafe { simd_sub(a.as_i8x16(), b.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i8x16_sub as u8x16_sub; /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit @@ -2158,6 +2345,7 @@ pub use i8x16_sub as u8x16_sub; #[cfg_attr(test, assert_instr(i8x16.sub_sat_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.sub_sat_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_sub_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i8x16_sub_sat_s(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -2168,6 +2356,7 @@ pub fn i8x16_sub_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.sub_sat_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.sub_sat_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_sub_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i8x16_sub_sat_u(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -2178,6 +2367,7 @@ pub fn u8x16_sub_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.min_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.min_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_min(a: v128, b: v128) -> v128 { let a = a.as_i8x16(); let b = b.as_i8x16(); @@ -2190,6 +2380,7 @@ pub fn i8x16_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.min_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.min_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_min(a: v128, b: v128) -> v128 { let a = a.as_u8x16(); let b = b.as_u8x16(); @@ -2202,6 +2393,7 @@ pub fn u8x16_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.max_s))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.max_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i8x16_max(a: v128, b: v128) -> v128 { let a = a.as_i8x16(); let b = b.as_i8x16(); @@ -2214,6 +2406,7 @@ pub fn i8x16_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.max_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.max_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_max(a: v128, b: v128) -> v128 { let a = a.as_u8x16(); let b = b.as_u8x16(); @@ -2225,6 +2418,7 @@ pub fn u8x16_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i8x16.avgr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i8x16.avgr_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u8x16_avgr(a: v128, b: v128) -> v128 { unsafe { llvm_avgr_u_i8x16(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -2235,6 +2429,7 @@ pub fn u8x16_avgr(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extadd_pairwise_i8x16_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extadd_pairwise_i8x16(a: v128) -> v128 { unsafe { llvm_i16x8_extadd_pairwise_i8x16_s(a.as_i8x16()).v128() } } @@ -2245,10 +2440,12 @@ pub fn i16x8_extadd_pairwise_i8x16(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extadd_pairwise_i8x16_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extadd_pairwise_u8x16(a: v128) -> v128 { unsafe { llvm_i16x8_extadd_pairwise_i8x16_u(a.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_extadd_pairwise_u8x16 as u16x8_extadd_pairwise_u8x16; /// Lane-wise wrapping absolute value. @@ -2256,6 +2453,7 @@ pub use i16x8_extadd_pairwise_u8x16 as u16x8_extadd_pairwise_u8x16; #[cfg_attr(test, assert_instr(i16x8.abs))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.abs"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_abs(a: v128) -> v128 { let a = a.as_i16x8(); let zero = simd::i16x8::splat(0); @@ -2269,6 +2467,7 @@ pub fn i16x8_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.neg))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.neg"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_neg(a: v128) -> v128 { unsafe { simd_mul(a.as_i16x8(), simd::i16x8::splat(-1)).v128() } } @@ -2278,6 +2477,7 @@ pub fn i16x8_neg(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.qmulr_sat_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.q15mulr_sat_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_q15mulr_sat(a: v128, b: v128) -> v128 { unsafe { llvm_q15mulr(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2287,10 +2487,12 @@ pub fn i16x8_q15mulr_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.all_true))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.all_true"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_all_true(a: v128) -> bool { unsafe { llvm_i16x8_all_true(a.as_i16x8()) != 0 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_all_true as u16x8_all_true; /// Extracts the high bit for each lane in `a` and produce a scalar mask with @@ -2299,10 +2501,12 @@ pub use i16x8_all_true as u16x8_all_true; #[cfg_attr(test, assert_instr(i16x8.bitmask))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.bitmask"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_bitmask(a: v128) -> u8 { unsafe { llvm_bitmask_i16x8(a.as_i16x8()) as u8 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_bitmask as u16x8_bitmask; /// Converts two input vectors into a smaller lane vector by narrowing each @@ -2314,6 +2518,7 @@ pub use i16x8_bitmask as u16x8_bitmask; #[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.narrow_i32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_narrow_i32x4(a: v128, b: v128) -> v128 { unsafe { llvm_narrow_i16x8_s(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -2327,6 +2532,7 @@ pub fn i16x8_narrow_i32x4(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.narrow_i32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_narrow_i32x4(a: v128, b: v128) -> v128 { unsafe { llvm_narrow_i16x8_u(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -2337,6 +2543,7 @@ pub fn u16x8_narrow_i32x4(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extend_low_i8x16_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extend_low_i8x16(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle8!( @@ -2354,6 +2561,7 @@ pub fn i16x8_extend_low_i8x16(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extend_high_i8x16_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extend_high_i8x16(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle8!( @@ -2371,6 +2579,7 @@ pub fn i16x8_extend_high_i8x16(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extend_low_i8x16_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extend_low_u8x16(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle8!( @@ -2382,6 +2591,7 @@ pub fn i16x8_extend_low_u8x16(a: v128) -> v128 { } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_extend_low_u8x16 as u16x8_extend_low_u8x16; /// Converts high half of the smaller lane vector to a larger lane @@ -2390,6 +2600,7 @@ pub use i16x8_extend_low_u8x16 as u16x8_extend_low_u8x16; #[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extend_high_i8x16_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extend_high_u8x16(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle8!( @@ -2401,6 +2612,7 @@ pub fn i16x8_extend_high_u8x16(a: v128) -> v128 { } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_extend_high_u8x16 as u16x8_extend_high_u8x16; /// Shifts each lane to the left by the specified number of bits. @@ -2411,10 +2623,12 @@ pub use i16x8_extend_high_u8x16 as u16x8_extend_high_u8x16; #[cfg_attr(test, assert_instr(i16x8.shl))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.shl"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_shl(a: v128, amt: u32) -> v128 { unsafe { simd_shl(a.as_i16x8(), simd::i16x8::splat(amt as i16)).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_shl as u16x8_shl; /// Shifts each lane to the right by the specified number of bits, sign @@ -2426,6 +2640,7 @@ pub use i16x8_shl as u16x8_shl; #[cfg_attr(test, assert_instr(i16x8.shr_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.shr_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_i16x8(), simd::i16x8::splat(amt as i16)).v128() } } @@ -2439,6 +2654,7 @@ pub fn i16x8_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i16x8.shr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.shr_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_u16x8(), simd::u16x8::splat(amt as u16)).v128() } } @@ -2448,10 +2664,12 @@ pub fn u16x8_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i16x8.add))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.add"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_add(a: v128, b: v128) -> v128 { unsafe { simd_add(a.as_i16x8(), b.as_i16x8()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_add as u16x8_add; /// Adds two 128-bit vectors as if they were two packed eight 16-bit signed @@ -2460,6 +2678,7 @@ pub use i16x8_add as u16x8_add; #[cfg_attr(test, assert_instr(i16x8.add_sat_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.add_sat_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_add_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_add_sat_s(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2470,6 +2689,7 @@ pub fn i16x8_add_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.add_sat_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.add_sat_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_add_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_add_sat_u(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2479,10 +2699,12 @@ pub fn u16x8_add_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.sub))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.sub"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_sub(a: v128, b: v128) -> v128 { unsafe { simd_sub(a.as_i16x8(), b.as_i16x8()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_sub as u16x8_sub; /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit @@ -2491,6 +2713,7 @@ pub use i16x8_sub as u16x8_sub; #[cfg_attr(test, assert_instr(i16x8.sub_sat_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.sub_sat_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_sub_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_sub_sat_s(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2501,6 +2724,7 @@ pub fn i16x8_sub_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.sub_sat_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.sub_sat_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_sub_sat(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_sub_sat_u(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2511,10 +2735,12 @@ pub fn u16x8_sub_sat(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.mul))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.mul"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_mul(a: v128, b: v128) -> v128 { unsafe { simd_mul(a.as_i16x8(), b.as_i16x8()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_mul as u16x8_mul; /// Compares lane-wise signed integers, and returns the minimum of @@ -2523,6 +2749,7 @@ pub use i16x8_mul as u16x8_mul; #[cfg_attr(test, assert_instr(i16x8.min_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.min_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_min(a: v128, b: v128) -> v128 { let a = a.as_i16x8(); let b = b.as_i16x8(); @@ -2535,6 +2762,7 @@ pub fn i16x8_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.min_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.min_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_min(a: v128, b: v128) -> v128 { let a = a.as_u16x8(); let b = b.as_u16x8(); @@ -2547,6 +2775,7 @@ pub fn u16x8_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.max_s))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.max_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_max(a: v128, b: v128) -> v128 { let a = a.as_i16x8(); let b = b.as_i16x8(); @@ -2559,6 +2788,7 @@ pub fn i16x8_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.max_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.max_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_max(a: v128, b: v128) -> v128 { let a = a.as_u16x8(); let b = b.as_u16x8(); @@ -2570,6 +2800,7 @@ pub fn u16x8_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i16x8.avgr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i16x8.avgr_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u16x8_avgr(a: v128, b: v128) -> v128 { unsafe { llvm_avgr_u_i16x8(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2582,6 +2813,7 @@ pub fn u16x8_avgr(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extmul_low_i8x16_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_extmul_low_i8x16_s(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -2594,6 +2826,7 @@ pub fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extmul_high_i8x16_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_extmul_high_i8x16_s(a.as_i8x16(), b.as_i8x16()).v128() } } @@ -2606,10 +2839,12 @@ pub fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extmul_low_i8x16_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extmul_low_u8x16(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_extmul_low_i8x16_u(a.as_i8x16(), b.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_extmul_low_u8x16 as u16x8_extmul_low_u8x16; /// Lane-wise integer extended multiplication producing twice wider result than @@ -2620,10 +2855,12 @@ pub use i16x8_extmul_low_u8x16 as u16x8_extmul_low_u8x16; // #[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i16x8.extmul_high_i8x16_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i16x8_extmul_high_u8x16(a: v128, b: v128) -> v128 { unsafe { llvm_i16x8_extmul_high_i8x16_u(a.as_i8x16(), b.as_i8x16()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i16x8_extmul_high_u8x16 as u16x8_extmul_high_u8x16; /// Lane-wise integer extended pairwise addition producing extended results @@ -2632,6 +2869,7 @@ pub use i16x8_extmul_high_u8x16 as u16x8_extmul_high_u8x16; // #[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extadd_pairwise_i16x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extadd_pairwise_i16x8(a: v128) -> v128 { unsafe { llvm_i32x4_extadd_pairwise_i16x8_s(a.as_i16x8()).v128() } } @@ -2642,10 +2880,12 @@ pub fn i32x4_extadd_pairwise_i16x8(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_u))] // FIXME wasmtime #[doc(alias("i32x4.extadd_pairwise_i16x8_u"))] #[target_feature(enable = "simd128")] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extadd_pairwise_u16x8(a: v128) -> v128 { unsafe { llvm_i32x4_extadd_pairwise_i16x8_u(a.as_i16x8()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_extadd_pairwise_u16x8 as u32x4_extadd_pairwise_u16x8; /// Lane-wise wrapping absolute value. @@ -2653,6 +2893,7 @@ pub use i32x4_extadd_pairwise_u16x8 as u32x4_extadd_pairwise_u16x8; #[cfg_attr(test, assert_instr(i32x4.abs))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.abs"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_abs(a: v128) -> v128 { let a = a.as_i32x4(); let zero = simd::i32x4::splat(0); @@ -2666,6 +2907,7 @@ pub fn i32x4_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.neg))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.neg"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_neg(a: v128) -> v128 { unsafe { simd_mul(a.as_i32x4(), simd::i32x4::splat(-1)).v128() } } @@ -2675,10 +2917,12 @@ pub fn i32x4_neg(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.all_true))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.all_true"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_all_true(a: v128) -> bool { unsafe { llvm_i32x4_all_true(a.as_i32x4()) != 0 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_all_true as u32x4_all_true; /// Extracts the high bit for each lane in `a` and produce a scalar mask with @@ -2687,10 +2931,12 @@ pub use i32x4_all_true as u32x4_all_true; #[cfg_attr(test, assert_instr(i32x4.bitmask))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.bitmask"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_bitmask(a: v128) -> u8 { unsafe { llvm_bitmask_i32x4(a.as_i32x4()) as u8 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_bitmask as u32x4_bitmask; /// Converts low half of the smaller lane vector to a larger lane @@ -2699,6 +2945,7 @@ pub use i32x4_bitmask as u32x4_bitmask; #[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extend_low_i16x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extend_low_i16x8(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle4!( @@ -2716,6 +2963,7 @@ pub fn i32x4_extend_low_i16x8(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extend_high_i16x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extend_high_i16x8(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle4!( @@ -2733,6 +2981,7 @@ pub fn i32x4_extend_high_i16x8(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extend_low_i16x8_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extend_low_u16x8(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle4!( @@ -2744,6 +2993,7 @@ pub fn i32x4_extend_low_u16x8(a: v128) -> v128 { } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_extend_low_u16x8 as u32x4_extend_low_u16x8; /// Converts high half of the smaller lane vector to a larger lane @@ -2752,6 +3002,7 @@ pub use i32x4_extend_low_u16x8 as u32x4_extend_low_u16x8; #[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extend_high_i16x8_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extend_high_u16x8(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle4!( @@ -2763,6 +3014,7 @@ pub fn i32x4_extend_high_u16x8(a: v128) -> v128 { } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_extend_high_u16x8 as u32x4_extend_high_u16x8; /// Shifts each lane to the left by the specified number of bits. @@ -2773,10 +3025,12 @@ pub use i32x4_extend_high_u16x8 as u32x4_extend_high_u16x8; #[cfg_attr(test, assert_instr(i32x4.shl))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.shl"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_shl(a: v128, amt: u32) -> v128 { unsafe { simd_shl(a.as_i32x4(), simd::i32x4::splat(amt as i32)).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_shl as u32x4_shl; /// Shifts each lane to the right by the specified number of bits, sign @@ -2788,6 +3042,7 @@ pub use i32x4_shl as u32x4_shl; #[cfg_attr(test, assert_instr(i32x4.shr_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.shr_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_i32x4(), simd::i32x4::splat(amt as i32)).v128() } } @@ -2801,6 +3056,7 @@ pub fn i32x4_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i32x4.shr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.shr_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_u32x4(), simd::u32x4::splat(amt as u32)).v128() } } @@ -2810,10 +3066,12 @@ pub fn u32x4_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i32x4.add))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.add"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_add(a: v128, b: v128) -> v128 { unsafe { simd_add(a.as_i32x4(), b.as_i32x4()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_add as u32x4_add; /// Subtracts two 128-bit vectors as if they were two packed four 32-bit integers. @@ -2821,10 +3079,12 @@ pub use i32x4_add as u32x4_add; #[cfg_attr(test, assert_instr(i32x4.sub))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.sub"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_sub(a: v128, b: v128) -> v128 { unsafe { simd_sub(a.as_i32x4(), b.as_i32x4()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_sub as u32x4_sub; /// Multiplies two 128-bit vectors as if they were two packed four 32-bit @@ -2833,10 +3093,12 @@ pub use i32x4_sub as u32x4_sub; #[cfg_attr(test, assert_instr(i32x4.mul))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.mul"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_mul(a: v128, b: v128) -> v128 { unsafe { simd_mul(a.as_i32x4(), b.as_i32x4()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_mul as u32x4_mul; /// Compares lane-wise signed integers, and returns the minimum of @@ -2845,6 +3107,7 @@ pub use i32x4_mul as u32x4_mul; #[cfg_attr(test, assert_instr(i32x4.min_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.min_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_min(a: v128, b: v128) -> v128 { let a = a.as_i32x4(); let b = b.as_i32x4(); @@ -2857,6 +3120,7 @@ pub fn i32x4_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.min_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.min_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_min(a: v128, b: v128) -> v128 { let a = a.as_u32x4(); let b = b.as_u32x4(); @@ -2869,6 +3133,7 @@ pub fn u32x4_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.max_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.max_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_max(a: v128, b: v128) -> v128 { let a = a.as_i32x4(); let b = b.as_i32x4(); @@ -2881,6 +3146,7 @@ pub fn i32x4_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.max_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.max_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_max(a: v128, b: v128) -> v128 { let a = a.as_u32x4(); let b = b.as_u32x4(); @@ -2893,6 +3159,7 @@ pub fn u32x4_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.dot_i16x8_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.dot_i16x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_dot_i16x8(a: v128, b: v128) -> v128 { unsafe { llvm_i32x4_dot_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2905,6 +3172,7 @@ pub fn i32x4_dot_i16x8(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extmul_low_i16x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 { unsafe { llvm_i32x4_extmul_low_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2917,6 +3185,7 @@ pub fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extmul_high_i16x8_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 { unsafe { llvm_i32x4_extmul_high_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() } } @@ -2929,10 +3198,12 @@ pub fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extmul_low_i16x8_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extmul_low_u16x8(a: v128, b: v128) -> v128 { unsafe { llvm_i32x4_extmul_low_i16x8_u(a.as_i16x8(), b.as_i16x8()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_extmul_low_u16x8 as u32x4_extmul_low_u16x8; /// Lane-wise integer extended multiplication producing twice wider result than @@ -2943,10 +3214,12 @@ pub use i32x4_extmul_low_u16x8 as u32x4_extmul_low_u16x8; // #[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.extmul_high_i16x8_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_extmul_high_u16x8(a: v128, b: v128) -> v128 { unsafe { llvm_i32x4_extmul_high_i16x8_u(a.as_i16x8(), b.as_i16x8()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i32x4_extmul_high_u16x8 as u32x4_extmul_high_u16x8; /// Lane-wise wrapping absolute value. @@ -2954,6 +3227,7 @@ pub use i32x4_extmul_high_u16x8 as u32x4_extmul_high_u16x8; // #[cfg_attr(test, assert_instr(i64x2.abs))] // FIXME llvm #[target_feature(enable = "simd128")] #[doc(alias("i64x2.abs"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_abs(a: v128) -> v128 { let a = a.as_i64x2(); let zero = simd::i64x2::splat(0); @@ -2967,6 +3241,7 @@ pub fn i64x2_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.neg))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.neg"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_neg(a: v128) -> v128 { unsafe { simd_mul(a.as_i64x2(), simd::i64x2::splat(-1)).v128() } } @@ -2976,10 +3251,12 @@ pub fn i64x2_neg(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i64x2.all_true))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.all_true"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_all_true(a: v128) -> bool { unsafe { llvm_i64x2_all_true(a.as_i64x2()) != 0 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_all_true as u64x2_all_true; /// Extracts the high bit for each lane in `a` and produce a scalar mask with @@ -2988,10 +3265,12 @@ pub use i64x2_all_true as u64x2_all_true; #[cfg_attr(test, assert_instr(i64x2.bitmask))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.bitmask"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_bitmask(a: v128) -> u8 { unsafe { llvm_bitmask_i64x2(a.as_i64x2()) as u8 } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_bitmask as u64x2_bitmask; /// Converts low half of the smaller lane vector to a larger lane @@ -3000,6 +3279,7 @@ pub use i64x2_bitmask as u64x2_bitmask; // #[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extend_low_i32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extend_low_i32x4(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [0, 1])) @@ -3013,6 +3293,7 @@ pub fn i64x2_extend_low_i32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extend_high_i32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extend_high_i32x4(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [2, 3])) @@ -3026,6 +3307,7 @@ pub fn i64x2_extend_high_i32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extend_low_i32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extend_low_u32x4(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [0, 1])) @@ -3033,6 +3315,7 @@ pub fn i64x2_extend_low_u32x4(a: v128) -> v128 { } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_extend_low_u32x4 as u64x2_extend_low_u32x4; /// Converts high half of the smaller lane vector to a larger lane @@ -3041,6 +3324,7 @@ pub use i64x2_extend_low_u32x4 as u64x2_extend_low_u32x4; // #[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extend_high_i32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extend_high_u32x4(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [2, 3])) @@ -3048,6 +3332,7 @@ pub fn i64x2_extend_high_u32x4(a: v128) -> v128 { } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_extend_high_u32x4 as u64x2_extend_high_u32x4; /// Shifts each lane to the left by the specified number of bits. @@ -3058,10 +3343,12 @@ pub use i64x2_extend_high_u32x4 as u64x2_extend_high_u32x4; #[cfg_attr(test, assert_instr(i64x2.shl))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.shl"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_shl(a: v128, amt: u32) -> v128 { unsafe { simd_shl(a.as_i64x2(), simd::i64x2::splat(amt as i64)).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_shl as u64x2_shl; /// Shifts each lane to the right by the specified number of bits, sign @@ -3073,6 +3360,7 @@ pub use i64x2_shl as u64x2_shl; #[cfg_attr(test, assert_instr(i64x2.shr_s))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.shr_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_i64x2(), simd::i64x2::splat(amt as i64)).v128() } } @@ -3086,6 +3374,7 @@ pub fn i64x2_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i64x2.shr_u))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.shr_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u64x2_shr(a: v128, amt: u32) -> v128 { unsafe { simd_shr(a.as_u64x2(), simd::u64x2::splat(amt as u64)).v128() } } @@ -3095,10 +3384,12 @@ pub fn u64x2_shr(a: v128, amt: u32) -> v128 { #[cfg_attr(test, assert_instr(i64x2.add))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.add"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_add(a: v128, b: v128) -> v128 { unsafe { simd_add(a.as_i64x2(), b.as_i64x2()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_add as u64x2_add; /// Subtracts two 128-bit vectors as if they were two packed two 64-bit integers. @@ -3106,10 +3397,12 @@ pub use i64x2_add as u64x2_add; #[cfg_attr(test, assert_instr(i64x2.sub))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.sub"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_sub(a: v128, b: v128) -> v128 { unsafe { simd_sub(a.as_i64x2(), b.as_i64x2()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_sub as u64x2_sub; /// Multiplies two 128-bit vectors as if they were two packed two 64-bit integers. @@ -3117,10 +3410,12 @@ pub use i64x2_sub as u64x2_sub; #[cfg_attr(test, assert_instr(i64x2.mul))] #[target_feature(enable = "simd128")] #[doc(alias("i64x2.mul"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_mul(a: v128, b: v128) -> v128 { unsafe { simd_mul(a.as_i64x2(), b.as_i64x2()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_mul as u64x2_mul; /// Lane-wise integer extended multiplication producing twice wider result than @@ -3131,6 +3426,7 @@ pub use i64x2_mul as u64x2_mul; // #[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extmul_low_i32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 { unsafe { llvm_i64x2_extmul_low_i32x4_s(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -3143,6 +3439,7 @@ pub fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_s))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extmul_high_i32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 { unsafe { llvm_i64x2_extmul_high_i32x4_s(a.as_i32x4(), b.as_i32x4()).v128() } } @@ -3155,10 +3452,12 @@ pub fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 { // #[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extmul_low_i32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extmul_low_u32x4(a: v128, b: v128) -> v128 { unsafe { llvm_i64x2_extmul_low_i32x4_u(a.as_i32x4(), b.as_i32x4()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_extmul_low_u32x4 as u64x2_extmul_low_u32x4; /// Lane-wise integer extended multiplication producing twice wider result than @@ -3169,10 +3468,12 @@ pub use i64x2_extmul_low_u32x4 as u64x2_extmul_low_u32x4; // #[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i64x2.extmul_high_i32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i64x2_extmul_high_u32x4(a: v128, b: v128) -> v128 { unsafe { llvm_i64x2_extmul_high_i32x4_u(a.as_i32x4(), b.as_i32x4()).v128() } } +#[stable(feature = "wasm_simd", since = "1.54.0")] pub use i64x2_extmul_high_u32x4 as u64x2_extmul_high_u32x4; /// Lane-wise rounding to the nearest integral value not smaller than the input. @@ -3180,6 +3481,7 @@ pub use i64x2_extmul_high_u32x4 as u64x2_extmul_high_u32x4; #[cfg_attr(test, assert_instr(f32x4.ceil))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.ceil"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_ceil(a: v128) -> v128 { unsafe { llvm_f32x4_ceil(a.as_f32x4()).v128() } } @@ -3189,6 +3491,7 @@ pub fn f32x4_ceil(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.floor))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.floor"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_floor(a: v128) -> v128 { unsafe { llvm_f32x4_floor(a.as_f32x4()).v128() } } @@ -3199,6 +3502,7 @@ pub fn f32x4_floor(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.trunc))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.trunc"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_trunc(a: v128) -> v128 { unsafe { llvm_f32x4_trunc(a.as_f32x4()).v128() } } @@ -3209,6 +3513,7 @@ pub fn f32x4_trunc(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.nearest))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.nearest"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_nearest(a: v128) -> v128 { unsafe { llvm_f32x4_nearest(a.as_f32x4()).v128() } } @@ -3219,6 +3524,7 @@ pub fn f32x4_nearest(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.abs))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.abs"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_abs(a: v128) -> v128 { unsafe { llvm_f32x4_abs(a.as_f32x4()).v128() } } @@ -3229,6 +3535,7 @@ pub fn f32x4_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.neg))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.neg"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_neg(a: v128) -> v128 { f32x4_mul(a, f32x4_splat(-1.)) } @@ -3239,6 +3546,7 @@ pub fn f32x4_neg(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.sqrt))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.sqrt"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_sqrt(a: v128) -> v128 { unsafe { llvm_f32x4_sqrt(a.as_f32x4()).v128() } } @@ -3249,6 +3557,7 @@ pub fn f32x4_sqrt(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.add))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.add"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_add(a: v128, b: v128) -> v128 { unsafe { simd_add(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3259,6 +3568,7 @@ pub fn f32x4_add(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.sub))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.sub"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_sub(a: v128, b: v128) -> v128 { unsafe { simd_sub(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3269,6 +3579,7 @@ pub fn f32x4_sub(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.mul))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.mul"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_mul(a: v128, b: v128) -> v128 { unsafe { simd_mul(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3279,6 +3590,7 @@ pub fn f32x4_mul(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.div))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.div"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_div(a: v128, b: v128) -> v128 { unsafe { simd_div(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3289,6 +3601,7 @@ pub fn f32x4_div(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.min))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.min"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_min(a: v128, b: v128) -> v128 { unsafe { llvm_f32x4_min(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3299,6 +3612,7 @@ pub fn f32x4_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.max))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.max"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_max(a: v128, b: v128) -> v128 { unsafe { llvm_f32x4_max(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3308,6 +3622,7 @@ pub fn f32x4_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.pmin))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.pmin"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_pmin(a: v128, b: v128) -> v128 { unsafe { llvm_f32x4_pmin(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3317,6 +3632,7 @@ pub fn f32x4_pmin(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.pmax))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.pmax"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_pmax(a: v128, b: v128) -> v128 { unsafe { llvm_f32x4_pmax(a.as_f32x4(), b.as_f32x4()).v128() } } @@ -3326,6 +3642,7 @@ pub fn f32x4_pmax(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.ceil))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.ceil"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_ceil(a: v128) -> v128 { unsafe { llvm_f64x2_ceil(a.as_f64x2()).v128() } } @@ -3335,6 +3652,7 @@ pub fn f64x2_ceil(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.floor))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.floor"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_floor(a: v128) -> v128 { unsafe { llvm_f64x2_floor(a.as_f64x2()).v128() } } @@ -3345,6 +3663,7 @@ pub fn f64x2_floor(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.trunc))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.trunc"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_trunc(a: v128) -> v128 { unsafe { llvm_f64x2_trunc(a.as_f64x2()).v128() } } @@ -3355,6 +3674,7 @@ pub fn f64x2_trunc(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.nearest))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.nearest"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_nearest(a: v128) -> v128 { unsafe { llvm_f64x2_nearest(a.as_f64x2()).v128() } } @@ -3365,6 +3685,7 @@ pub fn f64x2_nearest(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.abs))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.abs"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_abs(a: v128) -> v128 { unsafe { llvm_f64x2_abs(a.as_f64x2()).v128() } } @@ -3375,6 +3696,7 @@ pub fn f64x2_abs(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.neg))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.neg"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_neg(a: v128) -> v128 { f64x2_mul(a, f64x2_splat(-1.0)) } @@ -3385,6 +3707,7 @@ pub fn f64x2_neg(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.sqrt))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.sqrt"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_sqrt(a: v128) -> v128 { unsafe { llvm_f64x2_sqrt(a.as_f64x2()).v128() } } @@ -3395,6 +3718,7 @@ pub fn f64x2_sqrt(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.add))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.add"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_add(a: v128, b: v128) -> v128 { unsafe { simd_add(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3405,6 +3729,7 @@ pub fn f64x2_add(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.sub))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.sub"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_sub(a: v128, b: v128) -> v128 { unsafe { simd_sub(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3415,6 +3740,7 @@ pub fn f64x2_sub(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.mul))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.mul"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_mul(a: v128, b: v128) -> v128 { unsafe { simd_mul(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3425,6 +3751,7 @@ pub fn f64x2_mul(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.div))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.div"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_div(a: v128, b: v128) -> v128 { unsafe { simd_div(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3435,6 +3762,7 @@ pub fn f64x2_div(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.min))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.min"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_min(a: v128, b: v128) -> v128 { unsafe { llvm_f64x2_min(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3445,6 +3773,7 @@ pub fn f64x2_min(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.max))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.max"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_max(a: v128, b: v128) -> v128 { unsafe { llvm_f64x2_max(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3454,6 +3783,7 @@ pub fn f64x2_max(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.pmin))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.pmin"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_pmin(a: v128, b: v128) -> v128 { unsafe { llvm_f64x2_pmin(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3463,6 +3793,7 @@ pub fn f64x2_pmin(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.pmax))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.pmax"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_pmax(a: v128, b: v128) -> v128 { unsafe { llvm_f64x2_pmax(a.as_f64x2(), b.as_f64x2()).v128() } } @@ -3476,6 +3807,7 @@ pub fn f64x2_pmax(a: v128, b: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_s))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.trunc_sat_f32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_trunc_sat_f32x4(a: v128) -> v128 { unsafe { llvm_i32x4_trunc_sat_f32x4_s(a.as_f32x4()).v128() } } @@ -3489,6 +3821,7 @@ pub fn i32x4_trunc_sat_f32x4(a: v128) -> v128 { #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_u))] #[target_feature(enable = "simd128")] #[doc(alias("i32x4.trunc_sat_f32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_trunc_sat_f32x4(a: v128) -> v128 { unsafe { llvm_i32x4_trunc_sat_f32x4_u(a.as_f32x4()).v128() } } @@ -3499,6 +3832,7 @@ pub fn u32x4_trunc_sat_f32x4(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.convert_i32x4_s))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.convert_i32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_convert_i32x4(a: v128) -> v128 { unsafe { simd_cast::<_, simd::f32x4>(a.as_i32x4()).v128() } } @@ -3509,6 +3843,7 @@ pub fn f32x4_convert_i32x4(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f32x4.convert_i32x4_u))] #[target_feature(enable = "simd128")] #[doc(alias("f32x4.convert_i32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_convert_u32x4(a: v128) -> v128 { unsafe { simd_cast::<_, simd::f32x4>(a.as_u32x4()).v128() } } @@ -3525,6 +3860,7 @@ pub fn f32x4_convert_u32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_s_zero))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.trunc_sat_f64x2_s_zero"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 { let ret: simd::i32x4 = unsafe { simd_shuffle4!( @@ -3548,6 +3884,7 @@ pub fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_u_zero))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("i32x4.trunc_sat_f64x2_u_zero"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 { let ret: simd::i32x4 = unsafe { simd_shuffle4!( @@ -3564,6 +3901,7 @@ pub fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 { #[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_s))] #[target_feature(enable = "simd128")] #[doc(alias("f64x2.convert_low_i32x4_s"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_convert_low_i32x4(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [0, 1],)) @@ -3576,6 +3914,7 @@ pub fn f64x2_convert_low_i32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_u))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("f64x2.convert_low_i32x4_u"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_convert_low_u32x4(a: v128) -> v128 { unsafe { simd_cast::(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [0, 1],)) @@ -3592,6 +3931,7 @@ pub fn f64x2_convert_low_u32x4(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(f32x4.demote_f64x2_zero))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("f32x4.demote_f64x2_zero"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f32x4_demote_f64x2_zero(a: v128) -> v128 { unsafe { llvm_f32x4_demote_f64x2_zero(a.as_f64x2()).v128() } } @@ -3602,6 +3942,7 @@ pub fn f32x4_demote_f64x2_zero(a: v128) -> v128 { // #[cfg_attr(test, assert_instr(f64x2.promote_low_f32x4))] // FIXME wasmtime #[target_feature(enable = "simd128")] #[doc(alias("f32x4.promote_low_f32x4"))] +#[stable(feature = "wasm_simd", since = "1.54.0")] pub fn f64x2_promote_low_f32x4(a: v128) -> v128 { unsafe { llvm_f64x2_promote_low_f32x4(a.as_f32x4()).v128() } } @@ -3739,6 +4080,18 @@ pub mod tests { const _: v128 = i64x2(0, 1); const _: v128 = f32x4(0., 1., 2., 3.); const _: v128 = f64x2(0., 1.); + + let bytes: [i16; 8] = unsafe { mem::transmute(i16x8(-1, -2, -3, -4, -5, -6, -7, -8)) }; + assert_eq!(bytes, [-1, -2, -3, -4, -5, -6, -7, -8]); + let bytes: [i8; 16] = unsafe { + mem::transmute(i8x16( + -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, + )) + }; + assert_eq!( + bytes, + [-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16] + ); } #[test] diff --git a/examples/hex.rs b/examples/hex.rs index 8d41517f33..812836d661 100644 --- a/examples/hex.rs +++ b/examples/hex.rs @@ -14,7 +14,6 @@ #![feature(stdsimd, wasm_target_feature)] #![cfg_attr(test, feature(test))] -#![cfg_attr(target_arch = "wasm32", feature(wasm_simd))] #![allow( clippy::unwrap_used, clippy::print_stdout,